2016-11-13 184 views
0

我真的使用tensorflow神經網絡的簡單的例子:InvalidArgumentError在Tensorflow神經網絡

# define the numbers of nodes in hidden layers 
nodesLayer1 = 500 
nodesLayer2 = 500 
nodesLayer3 = 500 

# define our goal class 
classes = 2 
batchSize = 500 

# x for input, y for output 
sizeOfRow = len(data[0]) 
x = tensorFlow.placeholder(dtype= "float", shape=[None, sizeOfRow]) 
y = tensorFlow.placeholder(dtype= "float") 


def neuralNetworkModel(data): 
    # first step: (input * weights) + bias, linear operation like y = ax + b 
    # each layer connection to other layer will represent by nodes(i) * nodes(i+1) 
    hiddenLayer1 = {"weights" : tensorFlow.Variable(tensorFlow.random_normal([sizeOfRow, nodesLayer1])), 
        "biases" : tensorFlow.Variable(tensorFlow.random_normal([nodesLayer1]))} 

    hiddenLayer2 = {"weights" : tensorFlow.Variable(tensorFlow.random_normal([nodesLayer1, nodesLayer2])), 
        "biases" : tensorFlow.Variable(tensorFlow.random_normal([nodesLayer2]))} 

    hiddenLayer3 = {"weights": tensorFlow.Variable(tensorFlow.random_normal([nodesLayer2, nodesLayer3])), 
        "biases": tensorFlow.Variable(tensorFlow.random_normal([nodesLayer3]))} 

    outputLayer = {"weights": tensorFlow.Variable(tensorFlow.random_normal([nodesLayer3, classes])), 
        "biases": tensorFlow.Variable(tensorFlow.random_normal([classes]))} 

    # create the layers 
    layer1 = tensorFlow.add(tensorFlow.matmul(data, hiddenLayer1["weights"]), hiddenLayer1["biases"]) 
    layer1 = tensorFlow.nn.relu(layer1) # pass values to activation function (i.e sigmoid, softmax) and add it to the layer 

    layer2 = tensorFlow.add(tensorFlow.matmul(layer1, hiddenLayer2["weights"]), hiddenLayer2["biases"]) 
    layer2 = tensorFlow.nn.relu(layer2) 

    layer3 = tensorFlow.add(tensorFlow.matmul(layer2, hiddenLayer3["weights"]), hiddenLayer3["biases"]) 
    layer3 = tensorFlow.nn.relu(layer3) 

    output = tensorFlow.matmul(layer3, outputLayer["weights"]) + outputLayer["biases"] 

    return output 


def neuralNetworkTrain(x): 
    prediction = neuralNetworkModel(x) 
    # using softmax function, normalize values to range(0,1) 
    cost = tensorFlow.reduce_mean(tensorFlow.nn.softmax_cross_entropy_with_logits(prediction, y)) 

    # minimize the cost function 
    # at the same way we can use Gradiant Decent 
    # default learning rate is 0.001 in AdamOptimizer 
    optimizer = tensorFlow.train.AdamOptimizer(0.0001).minimize(cost) 
    epochs = 15 

    # build sessions and train the model 
    with tensorFlow.Session() as sess: 
    sess.run(tensorFlow.initialize_all_variables()) 

    for epoch in range(epochs): 
     epochLoss = 0 
     i = 0 
     for temp in range(int(len(data)/batchSize)): 
     ex, ey = nextBatch(batchSize, i) # takes 500 examples 
     i += 1 
     # TO-DO : fix bug here 
     temp, cos = sess.run((optimizer, cost)) # start session to optimize the cost function 
     epochLoss += cos 
     print("Epoch", epoch, "completed out of", epochs, "loss:", epochLoss) 

    correct = tensorFlow.equal(tensorFlow.argmax(prediction,1), tensorFlow.argmax(y, 1)) 
    accuracy = tensorFlow.reduce_mean(tensorFlow.cast(correct, "float")) 
    print("Accuracy:", accuracy.eval()) 

和我有此錯誤消息:

Caused by op u'Placeholder_1', defined at: 
    File "/home/or/PycharmProjects/untitled/NeuralNetwork.py", line 39, in <module> 
    y = tensorFlow.placeholder(dtype= "float") 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 1332, in placeholder 
    name=name) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 1748, in _placeholder 
    name=name) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 749, in apply_op 
    op_def=op_def) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2380, in create_op 
    original_op=self._default_original_op, op_def=op_def) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1298, in __init__ 
    self._traceback = _extract_stack() 

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_1' with dtype float 
    [[Node: Placeholder_1 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]] 

之前有人看到這一點,可以解釋請問如何解決? 我試過很多東西,閱讀了大量的tensorflow網站,但不能老是找到確切答案..

回答

0

指定佔位符數據類型時,你需要明確的一點信息...嘗試x = tf.placeholder(dtype=tf.float32)(通常使用脂肪酶float32的速度和內存目的)。這裏是鏈接到documentation for placeholder

當運行會話,則需要使用匹配的數據類型養活真實數據爲所有的佔位符。 更改neuralNetworkTrain功能如下:

def neuralNetworkTrain(x): 
    prediction = neuralNetworkModel(x) 
    # ... omitted code... 
    # build sessions and train the model 
    with tensorFlow.Session() as sess: 
     sess.run(tensorFlow.initialize_all_variables()) 
     for epoch in range(epochs): 
      epochLoss = 0 
      i = 0 
      for temp in range(int(len(data)/batchSize)): 
       ex, ey = nextBatch(batchSize, i) # takes 500 examples 
       i += 1 
       # TO-DO : fix bug here 
       # **Add feed_dict for placeholder x** 
       feed_dict = {x: ex} 
       temp, cos = sess.run((optimizer, cost), feed_dict=feed_dict) # start session to optimize the cost function 
       epochLoss += cos 
      print("Epoch", epoch, "completed out of", epochs, "loss:", epochLoss) 

      correct = tensorFlow.equal(tensorFlow.argmax(prediction,1), tensorFlow.argmax(y, 1)) 
      accuracy = tensorFlow.reduce_mean(tensorFlow.cast(correct, "float")) 
      # **Add feed_dict for placeholder y** 
      print("Accuracy:", accuracy.eval(feed_dict={y: ey})) 
+0

還是同樣的錯誤.. –

+0

對不起。我只注意到你沒有餵你的佔位符feed_dict(我的壞起初沒注意)..我更新了我的答案。讓我知道它是否修復了錯誤 –