2017-08-03 194 views
1

我正在運行一個運行平穩的TF的NN模型(該代碼可以在https://pythonprogramming.net/找到)。我想添加幾行來計算真實和錯誤的正面/負面以及精確度和召回率。我嘗試了很多sum函數,但Python中的對象對我來說並不熟悉。我無法運行sk,因爲我想與TF一起工作,這會對我使用的Python版本造成限制。感謝幫助。計算召回和tensorflow的精度

import pandas as pd 
import tensorflow as tf 
import numpy as np 
import random 
from random import shuffle 

train_x = pd.read_csv('train_x.csv') 
train_y = pd.read_csv('train_y.csv') 
test_x = pd.read_csv('test_x.csv') 
test_y = pd.read_csv('test_y.csv') 

n_nodes_hl1 = 30 
n_nodes_hl2 = 30 
n_nodes_hl3 = 30 

n_classes = 2 
batch_size = 2000 

x = tf.placeholder('float', [None, 61]) 
y = tf.placeholder('float') 

def neural_network_model(data): 
    hidden_1_layer = {'weights':tf.Variable(tf.random_normal([61, n_nodes_hl1])), 
         'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))} 

    hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])), 
         'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))} 

    hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])), 
         'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))} 

    output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])), 
        'biases':tf.Variable(tf.random_normal([n_classes])),} 


    l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases']) 
    l1 = tf.nn.relu(l1) 

    l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases']) 
    l2 = tf.nn.relu(l2) 

    l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases']) 
    l3 = tf.nn.relu(l3) 

    output = tf.matmul(l3,output_layer['weights']) + output_layer['biases'] 

    return output 

def train_neural_network(x): 
    prediction = neural_network_model(x) 

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y)) 
    optimizer = tf.train.AdamOptimizer().minimize(cost) 

    hm_epochs = 10 
    with tf.Session() as sess: 

     sess.run(tf.global_variables_initializer()) 

     for epoch in range(hm_epochs): 
      epoch_loss = 0 

      i = 0 
      while i < len(train_x): 
       start = i 
       end = i + batch_size 

       batch_x = np.array(train_x[start:end]) 
       batch_y = np.array(train_y[start:end]) 

       _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y}) 
       epoch_loss += c 
       i += batch_size 

      print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss) 

     correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) 

     accuracy = tf.reduce_mean(tf.cast(correct, 'float')) 
     print('Accuracy:',accuracy.eval({x:test_x, y:test_y})) 

train_neural_network(x) 

我試過如下:

argmax_prediction = tf.argmax(prediction, 1) 
argmax_y = tf.argmax(y, 1) 

TP = tf.count_nonzero(argmax_prediction * argmax_y, dtype=tf.float32) 
TN = tf.count_nonzero((argmax_prediction - 1) * (argmax_y - 1), dtype=tf.float32) 
FP = tf.count_nonzero(argmax_prediction * (argmax_y - 1), dtype=tf.float32) 
FN = tf.count_nonzero((argmax_prediction - 1) * argmax_y, dtype=tf.float32) 

precision = TP/(TP + FP) 
recall = TP/(TP + FN) 

print ("Precision", precision) 
print ("Recall", recall) 

我也得到

Precision Tensor("truediv:0", dtype=float32) 
Recall Tensor("truediv_1:0", dtype=float32) 

回答

0

既然你制定Precisionrecalltensor你需要使用tensorflow會話獲取值

  1. 你是如何得到預測的?

    prediction = some_function(x) 
    # x is your input placeholder for prediction 
    # y is the input placeholder for ground-truths 
    sess=tf.Session() 
    precision_, recall_ = sess.run([precision, recall], feed_dict={x: input, y: ground_truths}) 
    
+0

謝謝Ishant。 – symphony