當前位置:網站首頁>構建神經網絡- 手寫字體識別案例
構建神經網絡- 手寫字體識別案例
2022-01-28 11:41:00 【磁生電】
神經網絡構建:
Multilayer_Perceptron.py:
import numpy as np
from utils.features import prepare_for_training#做歸一化
from utils.hypothesis import sigmoid, sigmoid_gradient#sigmoid函數 極其導數
class MultilayerPerceptron:
#定義初始化函數
def __init__(self,data,labels,layers,normalize_data =False):
#數據預處理函數調用
data_processed = prepare_for_training(data,normalize_data = normalize_data)[0]
#初始化賦值操作
self.data= data_processed
self.labels= labels
self.layers= layers #784 25 10
self.normalize_data= normalize_data
self.thetas = MultilayerPerceptron.thetas_init(layers)#權重參數初始化
#第一層是輸入層,輸入像素點個數:28*28*1(長 寬 顏色通道)--不可改
#第二層:隱層神經元個數 25個(把784特征轉化為25維向量) --可改
#第三層:分類層,10分類任務
def predict(self,data):
data_processed = prepare_for_training(data,normalize_data = self.normalize_data)[0]
num_examples = data_processed.shape[0]
predictions = MultilayerPerceptron.feedforward_propagation(data_processed,self.thetas,self.layers)
return np.argmax(predictions,axis=1).reshape((num_examples,1))#返回最大概率值
#定義訓練模塊對參數進行更新,神經網絡也是用優化算法去做的(要傳入最大迭代次數max_iterations 和學習率alpha)
def train(self,max_iterations=1000,alpha=0.1):
#為了方便權重矩陣參數進行更新,把矩陣拉成一個向量,(後面再還原成矩陣)
unrolled_theta = MultilayerPerceptron.thetas_unroll(self.thetas)
#調用梯度下降函數對參數進行更新
(optimized_theta,cost_history) = MultilayerPerceptron.gradient_descent(self.data,self.labels,unrolled_theta,self.layers,max_iterations,alpha)
#還原成矩陣thetas_roll
self.thetas = MultilayerPerceptron.thetas_roll(optimized_theta,self.layers)
return self.thetas,cost_history
#定義權重矩陣初始化函數(傳入要更新權重的層數)
@staticmethod
def thetas_init(layers):
num_layers = len(layers)#layer是list結構
thetas = {
}#定義一個字典 每一層權重參數寫裏面
#用for循環對每層權重參數進行賦值
#在此案例中只有3層784 25 10 需要賦值的只有兩層
#循環執行兩次得到25*785和10*26兩組矩陣(785 26加上了權重偏置)
for layer_index in range(num_layers - 1):
""" 會執行兩次,得到兩組參數矩陣:25*785 , 10*26 """
in_count = layers[layer_index]
out_count = layers[layer_index+1]
# 這裏需要考慮到偏置項,記住一點偏置的個數跟輸出的結果是一致的
thetas[layer_index] = np.random.rand(out_count,in_count+1)*0.05 #隨機進行初始化操作,值盡量小一點
return thetas
#把矩陣合並向量
@staticmethod
def thetas_unroll(thetas):
num_theta_layers = len(thetas)
unrolled_theta = np.array([])
for theta_layer_index in range(num_theta_layers):
unrolled_theta = np.hstack((unrolled_theta,thetas[theta_layer_index].flatten()))
#flatten只能適用於numpy對象,即array或者mat,普通的list列錶不適用 a.flatten():a是個數組(矩陣),a.flatten()就是把a降到一維,默認是按行的方向降
return unrolled_theta
#定義梯度下降函數:梯度下降求的是前向和反向傳播的參數更新
#step:計算loss值 由loss計算梯度值 梯度值更新 return優化完的theta
@staticmethod
def gradient_descent(data,labels,unrolled_theta,layers,max_iterations,alpha):
optimized_theta = unrolled_theta
cost_history = []
for _ in range(max_iterations):
cost = MultilayerPerceptron.cost_function(data,labels,MultilayerPerceptron.thetas_roll(optimized_theta,layers),layers)
cost_history.append(cost)
theta_gradient = MultilayerPerceptron.gradient_step(data,labels,optimized_theta,layers)#計算theta梯度
optimized_theta = optimized_theta - alpha* theta_gradient#對theta進行更新
return optimized_theta,cost_history
#計算theta梯度函數:
@staticmethod
def gradient_step(data,labels,optimized_theta,layers):
theta = MultilayerPerceptron.thetas_roll(optimized_theta,layers)#把向量還原為矩陣
thetas_rolled_gradients = MultilayerPerceptron.back_propagation(data,labels,theta,layers)#調用反向傳播函數
thetas_unrolled_gradients = MultilayerPerceptron.thetas_unroll(thetas_rolled_gradients)
return thetas_unrolled_gradients
#定義反向傳播函數:
@staticmethod
def back_propagation(data,labels,thetas,layers):
num_layers = len(layers)
(num_examples,num_features) = data.shape#(1700 875)
num_label_types = layers[-1]
deltas = {
}
#初始化操作
for layer_index in range(num_layers -1 ):
in_count = layers[layer_index]
out_count = layers[layer_index+1]
deltas[layer_index] = np.zeros((out_count,in_count+1)) #25*785 10*26
for example_index in range(num_examples):
layers_inputs = {
}
layers_activations = {
}
layers_activation = data[example_index,:].reshape((num_features,1))#785*1
layers_activations[0] = layers_activation
#逐層計算
for layer_index in range(num_layers - 1):
layer_theta = thetas[layer_index] #得到當前權重參數值 25*785 10*26
layer_input = np.dot(layer_theta,layers_activation) #第一次得到25*1 第二次10*1
layers_activation = np.vstack((np.array([[1]]),sigmoid(layer_input)))
layers_inputs[layer_index + 1] = layer_input #後一層計算結果
layers_activations[layer_index + 1] = layers_activation #後一層經過激活函數後的結果
output_layer_activation = layers_activation[1:,:]
delta = {
}
#標簽處理
bitwise_label = np.zeros((num_label_types,1))
bitwise_label[labels[example_index][0]] = 1
#計算輸出層和真實值之間的差异
delta[num_layers - 1] = output_layer_activation - bitwise_label
#遍曆循環 L L-1 L-2 ...2
for layer_index in range(num_layers - 2,0,-1):
layer_theta = thetas[layer_index]
next_delta = delta[layer_index+1]
layer_input = layers_inputs[layer_index]
layer_input = np.vstack((np.array((1)),layer_input))
#按照公式進行計算
delta[layer_index] = np.dot(layer_theta.T,next_delta)*sigmoid_gradient(layer_input)
#過濾掉偏置參數
delta[layer_index] = delta[layer_index][1:,:]
for layer_index in range(num_layers-1):
layer_delta = np.dot(delta[layer_index+1],layers_activations[layer_index].T)
deltas[layer_index] = deltas[layer_index] + layer_delta #第一次25*785 第二次10*26
for layer_index in range(num_layers -1):
deltas[layer_index] = deltas[layer_index] * (1/num_examples)
return deltas
#定義損失函數
@staticmethod
def cost_function(data,labels,thetas,layers):
num_layers = len(layers)#層數
num_examples = data.shape[0]#樣本個數
num_labels = layers[-1]#label是layers的最後一層
#前向傳播走一次
predictions = MultilayerPerceptron.feedforward_propagation(data,thetas,layers)
#制作標簽,每一個樣本的標簽都得是one-hot
bitwise_labels = np.zeros((num_examples,num_labels))
for example_index in range(num_examples):
bitwise_labels[example_index][labels[example_index][0]] = 1
bit_set_cost = np.sum(np.log(predictions[bitwise_labels == 1]))
bit_not_set_cost = np.sum(np.log(1-predictions[bitwise_labels == 0]))
cost = (-1/num_examples) *(bit_set_cost+bit_not_set_cost)
return cost
#定義前向傳播函數
@staticmethod
def feedforward_propagation(data,thetas,layers):
num_layers = len(layers)
num_examples = data.shape[0]
in_layer_activation = data#輸入數據
# 逐層計算
for layer_index in range(num_layers - 1):
theta = thetas[layer_index]
out_layer_activation = sigmoid(np.dot(in_layer_activation,theta.T))
# 正常計算完之後是num_examples*25,但是要考慮偏置項 變成num_examples*26
out_layer_activation = np.hstack((np.ones((num_examples,1)),out_layer_activation))
in_layer_activation = out_layer_activation
#返回輸出層結果,結果中不要偏置項了
return in_layer_activation[:,1:]
#定義矩陣還原函數
@staticmethod
def thetas_roll(unrolled_thetas,layers):
num_layers = len(layers)
thetas = {
}#指定一個字典方便索引那一層矩陣
unrolled_shift = 0#指定一個標志比特 記錄到那一層
for layer_index in range(num_layers - 1):
in_count = layers[layer_index]
out_count = layers[layer_index+1]
thetas_width = in_count + 1
thetas_height = out_count
thetas_volume = thetas_width * thetas_height
start_index = unrolled_shift
end_index = unrolled_shift + thetas_volume
layer_theta_unrolled = unrolled_thetas[start_index:end_index]
thetas[layer_index] = layer_theta_unrolled.reshape((thetas_height,thetas_width))
unrolled_shift = unrolled_shift+thetas_volume
return thetas
數據集訓練:
minist.py:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mping
import math
from multilayer_perceptron import MultilayerPerceptron
data = pd.read_csv('../neural_network/data/mnist-demo.csv')
numbers_to_display = 25#要展示圖像的數量
num_cells = math.ceil(math.sqrt(numbers_to_display))
plt.figure(figsize=(10,10))
for plot_index in range(numbers_to_display):
digit = data[plot_index:plot_index+1].values
digit_label = digit[0][0]
digit_pixels = digit[0][1:]
image_size = int(math.sqrt(digit_pixels.shape[0]))
frame = digit_pixels.reshape((image_size,image_size))
plt.subplot(num_cells,num_cells,plot_index+1)
plt.imshow(frame,cmap='Purples')
plt.title(digit_label)
plt.subplots_adjust(wspace=0.5,hspace=0.5)
plt.show()
train_data = data.sample(frac = 0.8)
test_data = data.drop(train_data.index)
train_data = train_data.values
test_data = test_data.values
num_training_examples = 5000
x_train = train_data[:num_training_examples,1:]
y_train = train_data[:num_training_examples,[0]]
x_test = test_data[:,1:]
y_test = test_data[:,[0]]
layers=[784,25,10]
normalize_data = True
max_iterations = 500
alpha = 0.1
multilayer_perceptron = MultilayerPerceptron(x_train,y_train,layers,normalize_data)
(thetas,costs) = multilayer_perceptron.train(max_iterations,alpha)
plt.plot(range(len(costs)),costs)
plt.xlabel('Grident steps')
plt.ylabel('costs')
plt.show()
y_train_predictions = multilayer_perceptron.predict(x_train)
y_test_predictions = multilayer_perceptron.predict(x_test)
train_p = np.sum(y_train_predictions == y_train)/y_train.shape[0] * 100
test_p = np.sum(y_test_predictions == y_test)/y_test.shape[0] * 100
print ('訓練集准確率:',train_p)
print ('測試集准確率:',test_p)
numbers_to_display = 64
num_cells = math.ceil(math.sqrt(numbers_to_display))
plt.figure(figsize=(15, 15))
for plot_index in range(numbers_to_display):
digit_label = y_test[plot_index, 0]
digit_pixels = x_test[plot_index, :]
predicted_label = y_test_predictions[plot_index][0]
image_size = int(math.sqrt(digit_pixels.shape[0]))
frame = digit_pixels.reshape((image_size, image_size))
color_map = 'Greens' if predicted_label == digit_label else 'Reds'
plt.subplot(num_cells, num_cells, plot_index + 1)
plt.imshow(frame, cmap=color_map)
plt.title(predicted_label)
plt.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)
plt.subplots_adjust(hspace=0.5, wspace=0.5)
plt.show()
結果顯示:
版權聲明
本文為[磁生電]所創,轉載請帶上原文鏈接,感謝
https://cht.chowdera.com/2022/01/202201281140596734.html