0
点赞
收藏
分享

微信扫一扫

神经网络简单代码示例


  • 通过神经网络对鸢尾花进行预测
  • 在训练中使用前向传播进行预预测,接着计算误差,结合误差进行反向传播更新各个位置上的权重

from dataclasses import dataclass, field
import pandas as pd
from sklearn.datasets import load_iris
import numpy as np
from sklearn.model_selection import train_test_split

# 加载数据
ds = load_iris(as_frame=True, return_X_y=True)
data = ds[0].iloc[:100, :]
target = ds[1][:100]

def sigmoid(x):
    return 1 / (1 + np.exp(-1 * x))

def sigmoid_derivative(x):
    # Sigmoid 函数的导数
    return x * (1 - x)

@dataclass
class Neuron:
    weights: list[float] = field(default_factory=lambda: [])
    bias: float = 0.0
    N: float = 0.0
    M: float = 0.0

    def compute(self, inputs):
        self.N = np.dot(self.weights, inputs) + self.bias
        self.M = sigmoid(self.N)
        return self.M

@dataclass
class MyNeuronNetwork:
    HL1: Neuron = field(init=False)
    HL2: Neuron = field(init=False)
    HL3: Neuron = field(init=False)
    O1: Neuron = field(init=False)

    def __post_init__(self):
        self.HL1 = Neuron()
        self.HL1.weights = np.random.dirichlet(np.ones(4))
        self.HL1.bias = np.random.normal()

        self.HL2 = Neuron()
        self.HL2.weights = np.random.dirichlet(np.ones(4))
        self.HL2.bias = np.random.normal()

        self.HL3 = Neuron()
        self.HL3.weights = np.random.dirichlet(np.ones(4))
        self.HL3.bias = np.random.normal()

        self.O1 = Neuron()
        self.O1.weights = np.random.dirichlet(np.ones(3))
        self.O1.bias = np.random.normal()

    def compute(self, inputs):
        m1 = self.HL1.compute(inputs)
        m2 = self.HL2.compute(inputs)
        m3 = self.HL3.compute(inputs)
        output = self.O1.compute([m1, m2, m3])
        return output
# 在 Pandas 库中,DataFrame.iterrows() 是一个生成器方法,它允许你遍历 DataFrame 的每一行,并以 (index, Series) 对的形式返回每一行。这里的 for idx, row in data.iterrows(): 语句用于遍历 DataFrame 中的每一行数据。
    def train(self, data: pd.DataFrame, target: pd.Series, learning_rate=0.1, epochs=100):
        for epoch in range(epochs):
            total_error = 0
            for idx, row in data.iterrows():
                # 前向传播
                m1 = self.HL1.compute(row)  # 隐藏层中有3个节点(神经元),每个节点有4个输入
                m2 = self.HL2.compute(row)
                m3 = self.HL3.compute(row)
                prediction = self.O1.compute([m1, m2, m3])

                # 计算误差
                error = target[idx] - prediction
                total_error += error ** 2

                # 反向传播
                delta_output = error * sigmoid_derivative(prediction)  # 输出层的误差梯度 delta_output
                delta_hl1 = delta_output * self.O1.weights[0] * sigmoid_derivative(m1) # 每个隐藏层神经元的误差梯度 delta_hl1, delta_hl2, delta_hl3
                delta_hl2 = delta_output * self.O1.weights[1] * sigmoid_derivative(m2)
                delta_hl3 = delta_output * self.O1.weights[2] * sigmoid_derivative(m3)

                # 更新输出层权重
                self.O1.weights[0] += learning_rate * delta_output * m1
                self.O1.weights[1] += learning_rate * delta_output * m2
                self.O1.weights[2] += learning_rate * delta_output * m3
                self.O1.bias += learning_rate * delta_output

                # 更新隐藏层权重
                for i in range(len(row)):  # len(row) = 4,因为每个神经元与4个输入相连接
                    self.HL1.weights[i] += learning_rate * delta_hl1 * row[i]
                    self.HL2.weights[i] += learning_rate * delta_hl2 * row[i]
                    self.HL3.weights[i] += learning_rate * delta_hl3 * row[i]

                self.HL1.bias += learning_rate * delta_hl1
                self.HL2.bias += learning_rate * delta_hl2
                self.HL3.bias += learning_rate * delta_hl3

            if (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch + 1}, Total Error: {total_error}")

    def predict(self, data: pd.DataFrame):
        results = []
        for idx, row in enumerate(data.values):
            # 对于每一行数据 row,调用 self.compute(row) 方法来计算该样本的预测值。
            # self.compute(row) 会执行前向传播,通过隐藏层和输出层计算最终的输出值。
            pred = self.compute(row)
            # 计算出的预测值 pred 四舍五入到最近的整数,并将其添加到 results 列表中
            # 这里假设目标值是二分类问题(0 或 1),因此将连续的预测值四舍五入到最接近的整数。
            results.append(round(pred))
        return results

def main():
    # 加载数据
    ds = load_iris(as_frame=True, return_X_y=True)

    # 只用前100条数据
    data = ds[0].iloc[:100, :]
    target = ds[1][:100]

    # 划分训练数据,测试数据
    X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=42)

    # 创建神经网络
    nn = MyNeuronNetwork()

    # 用训练数据集来训练模型
    nn.train(X_train, y_train)

    # 检验模型
    # 用训练过的模型来预测测试数据的标签
    results = nn.predict(X_test)
    df = pd.DataFrame()
    df["预测值"] = results
    df["实际值"] = y_test.values
    print(df)

if __name__ == "__main__":
    main()


举报

相关推荐

0 条评论