2016-08-18 176 views
1

我已經在Theano中基於在Lasagne中的mnist.py示例構建了一個DNN。 我試圖訓練第一由單個隱藏層由神經網絡,定義爲Python,Theano - ValueError:輸入尺寸不匹配

def build_first_auto(input_var=None): 

    l_input=lasagne.layers.InputLayer(shape=(None, 1, 48, 1), input_var=input_var) 
    l_hidden1=lasagne.layers.DenseLayer(l_input,num_units=256,nonlinearity=lasagne.nonlinearities.sigmoid,W=lasagne.init.GlorotUniform()) 

    return l_hidden1 

這裏面

from load_dataset import load_dataset 
from build_DNNs import build_first_auto 

import sys 
import os 
import time 

import numpy as np 
from numpy import linalg as LA 
import theano 
import theano.tensor as T 

import lasagne 
import scipy.io as sio 

def iterate_minibatches(inputs, targets, batchsize, shuffle=False): 

    assert len(inputs) == len(targets) 
    if shuffle: 
     indices = np.arange(len(inputs)) 
     np.random.shuffle(indices) 
    for start_idx in range(0, len(inputs) - batchsize + 1, batchsize): 
     if shuffle: 
      excerpt = indices[start_idx:start_idx + batchsize] 
     else: 
       excerpt = slice(start_idx, start_idx + batchsize) 
     yield inputs[excerpt], targets[excerpt] 


def train_autoencoder(num_epochs): 

    Xtrain, ytrain = load_dataset() 

# Prepare Theano variables for inputs and targets 
    input_var = T.tensor4('inputs') 
    target_var = T.matrix('targets') 
# Create neural network model 
    network = build_first_auto(input_var) 

    prediction = lasagne.layers.get_output(network) 
    params = lasagne.layers.get_all_params(network, trainable=True) 

    loss = lasagne.objectives.binary_crossentropy(prediction, target_var) 
    loss = loss.mean() 

    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.9) 
    np.save('params', params) 

#Monitoring the training 
    test_prediction = lasagne.layers.get_output(network, deterministic=True) 
    test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,target_var) 
    test_loss = test_loss.mean() 

    test_acc = T.mean(T.eq(T.argmax(test_prediction,axis=1),target_var),dtype=theano.config.floatX) 


#Compile  
    train_fn = theano.function([input_var, target_var], loss, updates=updates, on_unused_input='ignore') #on_unused_input='ignore' 

# Compile a second function computing the validation loss and accuracy: 
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc]) 


#Training 
    print("Starting training...") 
    for epoch in range(num_epochs): 
    # In each epoch, we do a full pass over the training data: 
     train_err = 0 
     train_batches = 0 
     start_time = time.time() 
     for batch in iterate_minibatches(Xtrain, ytrain, 30821, shuffle=True): 
      inputs, targets = batch 

      train_err += train_fn(inputs, targets) 
      train_batches += 1 


    # And a full pass over the validation data: 
     val_err = 0 
     val_acc = 0 
     val_batches = 0 
     for batch in iterate_minibatches(Xtrain, ytrain, 30821, shuffle=False): 
      inputs, targets = batch 
      err, acc = val_fn(inputs, targets) 
      val_err += err 
      val_acc += acc 
      val_batches += 1 


    # Then we print the results for this epoch: 
     print("Epoch {} of {} took {:.3f}s".format(
     epoch + 1, num_epochs, time.time() - start_time)) 
     print(" training loss:\t\t{:.6f}".format(train_err/train_batches)) 
     print(" validation loss:\t\t{:.6f}".format(val_err/val_batches)) 
     print(" validation accuracy:\t\t{:.2f} %".format(
     val_acc/val_batches * 100)) 

的損失函數採用的是二進制交叉熵。的問題是,我正在與該陣列的尺寸的誤差:

ValueError: Input dimension mis-match. (input[1].shape[1] = 1, input[3].shape[1] = 256)

Apply node that caused the error: Elemwise{Composite{(((i0 * i1 * (i2 - scalar_sigmoid(i3)))/i4) - ((i0 * i5 * scalar_sigmoid(i3))/i4))}}(TensorConstant{(1, 1) of -1.0}, targets, TensorConstant{(1, 1) of 1.0}, Elemwise{Add}[(0, 0)].0, Elemwise{mul,no_inplace}.0, Elemwise{sub,no_inplace}.0)

Toposort index: 17

Inputs types: [TensorType(float64, (True, True)), TensorType(float64, matrix), TensorType(float64, (True, True)), TensorType(float64, matrix), TensorType(float64, (True, True)), TensorType(float64, matrix)]

Inputs shapes: [(1, 1), (30821, 1), (1, 1), (30821, 256), (1, 1), (30821, 1)]

Inputs strides: [(8, 8), (8, 8), (8, 8), (2048, 8), (8, 8), (8, 8)]

Inputs values: [array([[-1.]]), 'not shown', array([[ 1.]]), 'not shown', array([[ 30821.]]), 'not shown']

Outputs clients: [[Dot22Scalar(InplaceDimShuffle{1,0}.0, Elemwise{Composite{(((i0 * i1 * (i2 - scalar_sigmoid(i3)))/i4) - ((i0 * i5 * scalar_sigmoid(i3))/i4))}}.0, TensorConstant{0.01}), Sum{axis=[0], acc_dtype=float64}(Elemwise{Composite{(((i0 * i1 * (i2 - scalar_sigmoid(i3)))/i4) - ((i0 * i5 * scalar_sigmoid(i3))/i4))}}.0)]]

作爲提示我可以說,輸入的尺寸是(30821,1,48,1)和目標(30821,1 )。 我已經閱讀了幾個關於如何解決這個錯誤與重塑,但它不適用於我的情況。 也定義target_var = T.matrix()而不是T.ivector()沒有幫助。 爲隱藏層設置適當的尺寸是可行的,但這個神經網絡的功能應該獨立於這個數字。謝謝你的幫助。

回答

0

對於您的網絡,輸出爲256-dim。 由於您使用的是二元交叉熵損失函數,我想您想將樣本分爲兩類。你需要一個輸出層num_units = 2和softmax

def build_first_auto(input_var=None): 
    l_input=lasagne.layers.InputLayer(shape=(None, 1, 48, 1),input_var=input_var) 
    l_hidden1=lasagne.layers.DenseLayer(l_input,num_units=256,nonlinearity=lasagne.nonlinearities.sigmoid,W=lasagne.init.GlorotUniform()) 
    l_output=lasagne.layers.DenseLayer(l_hidden1,num_units=2,nonlinearity=lasagne.nonlinearities.softmax,W=lasagne.init.GlorotUniform()) 

return l_output 

這應該工作。如果有任何問題,請告訴我。