D
D
Dplll2019-02-07 03:03:00
Python
Dplll, 2019-02-07 03:03:00

How to fix ValueError in tensorflow?

import tensorflow as tf
import numpy as np

from bpemb import BPEmb
from tensorflow.contrib import rnn

batch_sz = 64
vocab_sz = 1000
embedding_dim = 100

units = 50

bpemb_en = BPEmb(lang = 'en', vs= vocab_sz, dim= embedding_dim)

max_length_inp = 10
max_length_target = 30
number_of_layers = 2

x = tf.placeholder('int32', shape= [None, max_length_inp])
y = tf.placeholder('float', shape= [None, max_length_target])

embedding_weights = tf.Variable(bpemb_en.vectors, trainable= False)
embed = tf.nn.embedding_lookup(embedding_weights, x)

def gru_cell():
    return rnn.GRUCell(units)

cells = [gru_cell() for _ in range(4)]


def encoder(embed, batch_sz):
                
    stacked_gru = rnn.MultiRNNCell(
            [gru_cell() for _ in range(number_of_layers)])
    
    outputs = []
    state = stacked_gru.zero_state(batch_sz, tf.float32)
 
    for i in range(max_length_inp):
        output, state = stacked_gru(embed[:, i], state)
        outputs.append(output)

    outputs = tf.stack(outputs)
    outputs = tf.transpose(outputs, [1, 0, 2])


    return outputs, state[-1]

outputs, state = encoder(embed, batch_sz)


def attention(outputs, hidden):    
    hidden_with_time_axis = tf.expand_dims(hidden, 1)
    
    score = tf.nn.tanh(tf.layers.dense(outputs, units) + tf.layers.dense(hidden_with_time_axis, units))
        
    attention_weights = tf.nn.softmax(tf.layers.dense(score, 1), axis = 1) 
    
    context_vector = attention_weights * outputs    
    context_vector = tf.reduce_sum(context_vector, axis= 1)

    return context_vector

contex_vector = attention(outputs, state)

def decoder(start, contex_vector, hidden, batch_sz):
    def gru_cell():
        return rnn.GRUCell(units)
    
    stacked_gru_ = rnn.MultiRNNCell(
            [gru_cell() for _ in range(number_of_layers)], state_is_tuple= False)
    

    state = stacked_gru_.zero_state(batch_sz, tf.float32)
    
    word_vector = tf.nn.embedding_lookup(embedding_weights, start)
    
    result = []

    for _ in range(max_length_target):
        output, state = stacked_gru_(word_vector[:, 0], state)
        result.append(output)
        break

    return result[0]

start = tf.placeholder('int32', [batch_sz, 1])
decoder(start, contex_vector, state, batch_sz)

ValueError: Variable multi_rnn_cell/cell_0/gru_cell/gates/kernel already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
Swears at output, state = stacked_gru_(word_vector[:, 0], state)

Answer the question

In order to leave comments, you need to log in

1 answer(s)
I
ivodopyanov, 2019-02-07
@adelshin23

Each variable in TF has its own name. By default, the compiler does not allow you to create two variables with the same name. If you need to reuse a variable in some place, then you must specify reuse=True when creating it. If you need to create a copy of a variable (the same shape + dtype, but separate weight values) - then either give it a different name (for example, 'decoder_cell'), or if a code block is reused (for example, the decoder function), and not one variable, you can pre-specify the namespace through with tf.variable_scope('.....'):

Didn't find what you were looking for?

Ask your question

Ask a Question

731 491 924 answers to any question