1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
|
def MIN_NN (dataset, n_iter = 1000, step = 0.1) :
np.random.seed(1)
# input dataset
X=np.random.random([5,2])
# output dataset
y=np.min(X,axis=1).T.reshape([-1,1])
# initialize weights randomly
syn00 = 2*np.random.random([2,1]) -1
syn01 = 2*np.random.random([2,1]) -1
syn02 = 2*np.random.random([2,1]) -1
syn10 = 2*np.random.random([3,1])-1
for iter in range(n_iter):
# forward propagation
correc00=np.zeros(syn00.shape)
correc01=np.zeros(syn01.shape)
correc02=np.zeros(syn02.shape)
correc10=np.zeros(syn10.shape)
for l0,yi in zip(X,y):
l0 = l0.reshape([1,-1])
aeb= relu(l0.dot(syn00)) #aeb -> A + B Represente les 3 cas possible pour calculer le Min(A,B)
amb= relu(l0.dot(syn01)) #amb -> A - B Car Min(A,B) = 1/2*(A+B) - 1/2*|A-B|
bma= relu(l0.dot(syn02)) #bma -> B - A
l1 = np.concatenate([aeb,amb,bma],axis=1)
l2 = relu(l1.dot(syn10))
# how much did we miss?
Dl2_error = yi - l2
# multiply how much we missed by the slope of the RElu at the values in l2
l2_delta = Dl2_error * relu(l2,True)
l1_delta_00 = np.concatenate([l0.T,np.zeros([2,1]),np.zeros([2,1])], axis=1)*l2_delta
l1_delta_01 = np.concatenate([np.zeros([2,1]),l0.T,np.zeros([2,1])], axis=1)*l2_delta
l1_delta_02 = np.concatenate([np.zeros([2,1]),np.zeros([2,1]),l0.T], axis=1)*l2_delta
correc00 -= l2_delta*l1_delta_00.dot(syn10)
correc01 -= l2_delta*l1_delta_01.dot(syn10)
correc02 -= l2_delta*l1_delta_02.dot(syn10)
correc10 -= np.dot(l1.T,l2_delta)
# update weights
syn00 += correc00
syn01 += correc01
syn02 += correc02
syn10 += correc10
if iter%100==0 :
print("Précision")
print(iter)
aeb= relu(X.dot(syn00))
amb= relu(X.dot(syn01))
bma= relu(X.dot(syn02))
l1 = np.concatenate([aeb,amb,bma],axis=1)
l2 = relu(l1.dot(syn10))
print(R2(y,l2))
print ("Output After Training:")
return l2 |
Partager