2013-03-03 107 views
1

我剛剛在python中寫了一個簡單的神經網絡,因爲我最近一直在研究它們。我正在使用backpropogation。激活函數是一個sigmoid。輸入和權重隨機生成,理想輸出爲0.我是python的新手,所以代碼編寫效率非常低,但它是可讀的。當我運行代碼時,輸​​出始終爲零,我似乎無法找到原因。另外,我沒有使用模塊。Python中的虛擬神經網絡

from random import uniform 
input_one_input_value = 1 
input_two_input_value = 0 
bias_value = 1 

#use global to globalize function-based variables 
#use lists to store data in future 
def hidden_One(): 
    global weighted_sum_hidden_one 
    weighted_sum_hidden_one = (input_one_input_value * weights[0]) + (input_two_input_value * weights[1]) + (bias_value * weights[2]) 
    hidden_one_output = activation(weighted_sum_hidden_one) 
    return hidden_one_output 

def hidden_Two(): 
    global weighted_sum_hidden_two 
    weighted_sum_hidden_two = (input_one_input_value * weights[3]) + (input_two_input_value * weights[4]) + (bias_value * weights[5]) 
    hidden_two_output = activation(weighted_sum_hidden_two) 
    return hidden_two_output 

def output_One(): 
    weighted_sum = (hidden_One() * weights[6]) + (hidden_Two() * weights[7]) + (bias_value * weights[8]) 
    return activation(weighted_sum) 

def activation(x): 
    sigmoid_value = 1/(1+(2.71828 ** x)) 
    return sigmoid_value 

def calculate_gradient(): 
    E = ideal_output - actual_output 
    output_delta = -E * activation(weights[6] + weights[7] + weights[8]) 
    h1_delta = activation(weighted_sum_hidden_one) * weights[6] * output_delta 
    h2_delta = activation(weighted_sum_hidden_two) * weights[7] * output_delta 
    b2_delta = activation(bias_value) * weights[8] * output_delta 
    i1_delta = activation(input_one_input_value) * ((weights[0] * h1_delta) + (weights[3] * h2_delta)) 
    i2_delta = activation(input_one_input_value) * ((weights[1] * h1_delta) + (weights[4] * h2_delta)) 
    b1_delta = activation(bias_value) * ((weights[2] * h1_delta) + (weights[5] * h2_delta)) 
    global w1_gradient 
    global w2_gradient 
    global w3_gradient 
    global w4_gradient 
    global w5_gradient 
    global w6_gradient 
    global w7_gradient 
    global w8_gradient 
    global w9_gradient 
    w1_gradient = input_one_input_value * h1_delta 
    w2_gradient = input_two_input_value * h1_delta 
    w3_gradient = bias_value * h1_delta 
    w4_gradient = input_one_input_value * h2_delta 
    w5_gradient = input_two_input_value * h2_delta 
    w6_gradient = bias_value * h2_delta 
    w7_gradient = hidden_One() * output_delta 
    w8_gradient = hidden_Two() * output_delta 
    w9_gradient = bias_value * output_delta 


def backpropogation(): 
    E = .7 #learning rate 
    a = .3 #momentum to prevent settling for local minima 
    global weightchanges_previous 
    global weight_change 
    weightchanges_previous = [] 
    weight_change = [] 
    if len(weightchanges_previous) == 0: 
     weight_change.append((E * w1_gradient)) 
     weight_change.append((E * w2_gradient)) 
     weight_change.append((E * w3_gradient)) 
     weight_change.append((E * w4_gradient)) 
     weight_change.append((E * w5_gradient)) 
     weight_change.append((E * w6_gradient)) 
     weight_change.append((E * w7_gradient)) 
     weight_change.append((E * w8_gradient)) 
     weight_change.append((E * w9_gradient)) 
     weightchanges_previous.append(weight_change[0]) 
     weightchanges_previous.append(weight_change[1]) 
     weightchanges_previous.append(weight_change[2]) 
     weightchanges_previous.append(weight_change[3]) 
     weightchanges_previous.append(weight_change[4]) 
     weightchanges_previous.append(weight_change[5]) 
     weightchanges_previous.append(weight_change[6]) 
     weightchanges_previous.append(weight_change[7]) 
     weightchanges_previous.append(weight_change[8]) 

    elif len(weightchanges_previous) != 0: 
     weight_change[0] = (E * w1_gradient) + (a * weightchanges_previous[0]) 
     weight_change[1] = (E * w2_gradient) + (a * weightchanges_previous[1]) 
     weight_change[2] = (E * w3_gradient) + (a * weightchanges_previous[2]) 
     weight_change[3] = (E * w4_gradient) + (a * weightchanges_previous[3]) 
     weight_change[4] = (E * w5_gradient) + (a * weightchanges_previous[4]) 
     weight_change[5] = (E * w6_gradient) + (a * weightchanges_previous[5]) 
     weight_change[6] = (E * w7_gradient) + (a * weightchanges_previous[6]) 
     weight_change[7] = (E * w8_gradient) + (a * weightchanges_previous[7]) 
     weight_change[8] = (E * w9_gradient) + (a * weightchanges_previous[8]) 
     while len(weightchanges_previous) > 0 : weightchanges_previous.pop() 
     weightchanges_previous.append((E * w1_gradient) + (a * weightchanges_previous[0])) 
     weightchanges_previous.append((E * w2_gradient) + (a * weightchanges_previous[1])) 
     weightchanges_previous.append((E * w3_gradient) + (a * weightchanges_previous[2])) 
     weightchanges_previous.append((E * w4_gradient) + (a * weightchanges_previous[3])) 
     weightchanges_previous.append((E * w5_gradient) + (a * weightchanges_previous[4])) 
     weightchanges_previous.append((E * w6_gradient) + (a * weightchanges_previous[5])) 
     weightchanges_previous.append((E * w7_gradient) + (a * weightchanges_previous[6])) 
     weightchanges_previous.append((E * w8_gradient) + (a * weightchanges_previous[7])) 
     weightchanges_previous.append((E * w9_gradient) + (a * weightchanges_previous[8])) 

def edit_weights(): 
    weights[0] += weight_change[0] 
    weights[1] += weight_change[1] 
    weights[2] += weight_change[2] 
    weights[3] += weight_change[3] 
    weights[4] += weight_change[4] 
    weights[5] += weight_change[5] 
    weights[6] += weight_change[6] 
    weights[7] += weight_change[7] 
    weights[8] += weight_change[8] 
    while len(weight_change) > 0 : weight_change.pop() 

weights = [] 
x = 0 
while x <=8: 
    weights.append(uniform(-10, 10)) 
    x += 1 

ideal_output = 0 
actual_output = output_One() 
print "Output %d" % output_One() 

x = 0 
while x <= 10: 
    output_One() 
    calculate_gradient() 
    backpropogation() 
    edit_weights() 
    print "Output %d" % output_One() 
    print "----------------------" 
    actual_output = output_One() 
    x += 1 

print "FINAL WEIGHTS:" 
print weights[0] 
print weights[1] 
print weights[2] 
print weights[3] 
print weights[4] 
print weights[5] 
print weights[6] 
print weights[7] 
print weights[8] 

回答

1

你的問題是輸出線:

print "Output %d" % output_One() 

您使用%d所以四捨五入浮點值至最接近的整數(整數轉換)它打印整數值。改用%f,你應該正確地得到浮點數。

+0

@ user2127869這是否回答了您的問題?如果是這樣,請將其標記爲已回答,以免它懸而未決。 – nemo 2013-03-03 16:22:40

+0

是的,謝謝!我在幾個小時前就明白了這一點。 – mackintosh18 2013-03-03 17:02:19

+0

@ user2127869:太好了!下一步是接受這個答案。 http://stackoverflow.com/faq#howtoask中描述瞭如何完成此操作。簡而言之:點擊您想要接受的答案的信譽計數器下方的複選框。 – nemo 2013-03-03 17:42:10