神经网络算法实现

import numpy as npy
def tanh(x):
    return npy.tanh(x)
def tanh_deriv(x):
    return 1.0-npy.tanh(x)**2
def logistic(x):
    return 1/(1+npy.exp(-x))
def logistic_deriv(x):
    return logistic(x)*(1-logistic(x))
class NeuralNetwork:
    # 初始化,layes表示的是一个list,eg[10,10,3]表示第一层10个神经元,第二层10个神经元,第三层3个神经元  def __init__(self,layers,activation="tanh"):
        if(activation=="logistic"):
            self.activation=logistic
            self.activation_deriv=logistic_deriv
        elif(activation=="tanh"):
            self.activation=tanh
            self.activation_deriv=tanh_deriv
        self.weight=[]
        # 循环从1开始,相当于以第二层为基准,进行权重的初始化  for i in range(1,len(layers)-1):
            # 对当前神经节点的前驱赋值  self.weight.append((2*npy.random.random((layers[i-1]+1,layers[i]+1))-1)*0.25)
            self.weight.append((2*npy.random.random((layers[i]+1,layers[i+1]))-1)*0.25)
        # 训练函数 ,X矩阵,每行是一个实例 ,y是每个实例对应的结果,learning_rate 学习率  # epochs,表示抽样的方法对神经网络进行更新的最大次数  def fit(self,X,y,learning_rete=0.2,epochs=10000):
        X=npy.atleast_2d(X)
        temp=npy.ones([X.shape[0],X.shape[1]+1])
        temp[:,0:-1]=X
        X=temp
        y=npy.array(y)
        for k in range(epochs):
            i=npy.random.randint(X.shape[0])
            a=[X[i]]
            for l in range(len(self.weight)):
                a.append(self.activation(npy.dot(a[l],self.weight[l])))
            error=y[i]-a[-1]
            deltas=[error-self.activation_deriv(a[-1])]
        # 开始反向计算误差,更新权重  for l in range(len(a)-2,0,-1):
            deltas.append(deltas[-l].dot(self.weight[l].T)*self.activation_deriv(a[l]))
        deltas.reverse()
        for i in range(len(self.weight)):
            layer=npy.atleast_2d(a[i])
            delta=npy.atleast_2d(deltas[i])
            self.weight[i]+=learning_rete*layer.T.dot(delta)
    # 预测函数  def predict(self,x):
        x=npy.array(x)
        temp=npy.ones(x.shape[0]+1)
        temp[0:-1]=x
        a=temp
        for l in range(0,len(self.weight)):
            a=self.activation(npy.dot(a,self.weight[l]))
        return a
x=npy.array([[0,0],[0,1],[1,0],[1,1]])
y=npy.array([1,0,0,1])
nn=NeuralNetwork([2,2,1],"tanh")
nn.fit(x,y)
for i in x:
    print(i,nn.predict(i))
    原文作者:神经网络算法
    原文地址: https://blog.csdn.net/weixin_41789633/article/details/79714754
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞