当前位置:实例文章 » Python实例» [文章]Python BP算法模板

Python BP算法模板

发布人:shili8 发布时间:2023-05-15 04:13 阅读次数:32

以下是一个简单的 Python BP(Back Propagation)算法模板:

```
import numpy as np

# 定义激活函数 sigmoid
def sigmoid(x):
return 1 / (1 + np.exp(-x))

# 定义网络结构
class Network():
def __init__(self, sizes):
self.num_layers = len(sizes) # 神经网络层数
self.sizes = sizes # 每层神经元数量
self.biases = [np.random.randn(y, 1) for y in sizes[1:]] # 随机初始化偏移量
self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])] # 随机初始化权重

def feedforward(self, a):
"""前向传播"""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a) + b) # 更新输入值
return a

def backprop(self, x, y):
"""反向传播"""
# 初始化偏导数、权重和偏移量的梯度
grad_b = [np.zeros(b.shape) for b in self.biases]
grad_w = [np.zeros(w.shape) for w in self.weights]

# 前向传播
activation = x
activations = [x] # 存放每一层的输出
zs = [] # 存放每一层神经元的加权输入
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)

# 反向传播
delta = (activations[-1] - y) * sigmoid(zs[-1]) * (1-sigmoid(zs[-1])) # 计算输出层误差
grad_b[-1] = delta
grad_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid(z) * (1 - sigmoid(z))
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
grad_b[-l] = delta
grad_w[-l] = np.dot(delta, activations[-l-1].transpose())

return (grad_b, grad_w)

def mini_batch_sgd(self, training_data, epochs, mini_batch_size, eta, test_data=None):
"""使用小批量梯度下降法训练神经网络"""
n = len(training_data)
for j in range(epochs):
np.random.shuffle(training_data)
mini_batches = [training_data[k:k+mini_batch_size] for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
n_test = len(test_data)
print("Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test))
else:
print("Epoch {0} complete".format(j))

def update_mini_batch(self, mini_batch, eta):
"""更新权重和偏移量的梯度"""
grad_b = [np.zeros(b.shape) for b in self.biases]
grad_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_grad_b, delta_grad_w = self.backprop(x, y)
grad_b = [gb + dgb for gb, dgb in zip(grad_b, delta_grad_b)]
grad_w = [gw + dgw for gw, dgw in zip(grad_w, delta_grad_w)]
self.weights = [w - (eta / len(mini_batch)) * gw for w, gw in zip(self.weights, grad_w)]
self.biases = [b - (eta / len(mini_batch)) * gb for b, gb in zip(self.biases, grad_b)]

def evaluate(self, test_data):
"""评估神经网络的准确性"""
test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
```

使用方法:

1. 创建神经网络

```
net = Network([input_size, hidden_size, output_size])
```

2. 训练神经网络

```
net.mini_batch_sgd(training_data, epochs, mini_batch_size, eta, test_data)
```

其中,`training_data` 为训练数据集,`test_data` 为测试数据集(可选),`epochs` 为迭代次数,`mini_batch_size` 为小批量数据大小,`eta` 为学习速率。

3. 使用神经网络进行预测

```
prediction = net.feedforward(input_data)
```

相关标签:

免责声明

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱290110527@qq.com删除。

其他信息

其他资源

Top