Dans le tutoriel : https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html Nous avons un modèle seq2seq à une couche. Je voudrais étendre ce modèle avec une couche supplémentaire du côté de l'encodeur et une couche supplémentaire du côté du décodeur. La formation semble fonctionner, mais je n'arrive pas à configurer correctement le décodeur à l'inférence avec plusieurs couches. Voici les modifications que j'ai apportées au modèle mentionné dans le tutoriel.
Encodeur :
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder1 = LSTM(
latent_dim,
return_sequences=True
)
encoder2 = LSTM(
latent_dim,
return_state=True,
)
x=encoder1(encoder_inputs)
encoder_outputs, state_h, state_c = encoder2(x)
Décodeur :
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder1 = LSTM(
latent_dim,
return_sequences=True
)
decoder2 = LSTM(
latent_dim,
return_sequences=True, return_state=True
)
dx = decoder1(decoder_inputs, initial_state=encoder_states)
decoder_outputs, _, _ = decoder2(dx)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
Inférence (c'est la partie où je ne sais pas comment créer un décodeur avec plusieurs couches) L'implémentation actuelle qui ne fonctionne pas est donnée ci-dessous :
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
out_decoder1 = LSTM(
latent_dim,
return_sequences=True, return_state=True
)
out_decoder2 = LSTM(
latent_dim,
return_sequences=True, return_state=True
)
odx = out_decoder1(decoder_inputs, initial_state=decoder_states_inputs)
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, state_h, state_c = out_decoder2(odx)
#decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# Reverse-lookup token index to decode sequences back to
# something readable.
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
print(output_tokens)
print(sampled_token_index)
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence
for seq_index in range(1):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence)
Thnx