3 Técnicas para melhorar o desempenho e o poder de generalização das RNN:
Diferente dos casos anteriores, em que analisamos sequência de texto, iremos trabalhar agora com séries temporais, outra aplicação de deep learning para sequências.
from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
import os
data_dir = '/content/drive/My Drive/Deep_Learning/'
fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')
# Abrindo o arquivo
f = open(fname)
data = f.read()
f.close()
lines = data.split('\n')
header = lines[0].split(',')
lines = lines[1:]
print(header)
print(len(lines))
['"Date Time"', '"p (mbar)"', '"T (degC)"', '"Tpot (K)"', '"Tdew (degC)"', '"rh (%)"', '"VPmax (mbar)"', '"VPact (mbar)"', '"VPdef (mbar)"', '"sh (g/kg)"', '"H2OC (mmol/mol)"', '"rho (g/m**3)"', '"wv (m/s)"', '"max. wv (m/s)"', '"wd (deg)"'] 420551
Precisamos converter o arquivo em um array do Numpy
import numpy as np
#criando uma matriz de zeros
float_data = np.zeros((len(lines), len(header) - 1))
#preenchendo com os dados do dataset
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i, :] = values
Plotando a temperatura ao longo do tempo (timeseries)
from matplotlib import pyplot as plt
temp = float_data[:, 1]
plt.plot(range(len(temp)), temp)
[<matplotlib.lines.Line2D at 0x7ff0068ebac8>]
Visualizando apenas os 10 primeiros dias:
plt.plot(range(1440), temp[:1440])
[<matplotlib.lines.Line2D at 0x7ff006441470>]
Settings:
- lookback = 720—Observations will go back 5 days.
- steps = 6—Observations will be sampled at one data point per hour.
- delay = 144—Targets will be 24 hours in the future.
Precisamos:
# Preparando os dados
# usando apenas os 200000 primeiros dados para a média e o desvio padrão
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
Precisamos fazer uma feature engineering e obter as seguintes variáveis:
def generator(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
Com a função generator iremos selecionar as amostras de treino, validação e teste. As seleções de validação e teste serão out of time, ou seja, em períodos posteriores.
#setting das constantes
lookback = 1440
step = 6
delay = 144
batch_size = 128
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step,
batch_size=batch_size)
# Garantindo que a amostra de validação seja feita no batch subsequente à amostra de treino
val_steps = (300000 - 200001 - lookback) // batch_size
# Garantindo que a amostra de teste seja feita no batch subsequente à amostra de validação
test_steps = (len(float_data) - 300001 - lookback) // batch_size
É sempre importante termos uma baseline para compararmos os resultados e fazermos um sanity check.
No caso da previsão de tempo, podemos assumir que:
Para avaliar esse baseline, usamos o erro absoluto médio (MAE).
def evaluate_naive_method():
batch_maes = []
for step in range(val_steps):
samples, targets = next(val_gen)
preds = samples[:, -1, 1]
mae = np.mean(np.abs(preds - targets))
batch_maes.append(mae)
print(np.mean(batch_maes))
evaluate_naive_method()
0.2897359729905486
Considerando que fizemos a normalização dos dados, temos: $0.29 \times \sigma_{Temperature} = 2.57^o C$
Temos que fazer uma previsão cujo MAE seja menor!
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Flatten(input_shape=(lookback // step, float_data.shape[-1])))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
Using TensorFlow backend.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:66: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:541: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4432: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1033: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1020: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3005: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead. Epoch 1/20 WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:207: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:216: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:223: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead. 500/500 [==============================] - 13s 26ms/step - loss: 1.6248 - val_loss: 1.7177 Epoch 2/20 500/500 [==============================] - 13s 25ms/step - loss: 2.5672 - val_loss: 5.1858 Epoch 3/20 500/500 [==============================] - 13s 25ms/step - loss: 5.5524 - val_loss: 8.4540 Epoch 4/20 500/500 [==============================] - 13s 26ms/step - loss: 9.3855 - val_loss: 10.1728 Epoch 5/20 500/500 [==============================] - 13s 25ms/step - loss: 14.1595 - val_loss: 16.8647 Epoch 6/20 500/500 [==============================] - 13s 25ms/step - loss: 18.9023 - val_loss: 24.4707 Epoch 7/20 500/500 [==============================] - 13s 26ms/step - loss: 23.9287 - val_loss: 30.2016 Epoch 8/20 500/500 [==============================] - 13s 25ms/step - loss: 28.8016 - val_loss: 29.5478 Epoch 9/20 500/500 [==============================] - 13s 26ms/step - loss: 33.7740 - val_loss: 55.5294 Epoch 10/20 500/500 [==============================] - 13s 26ms/step - loss: 38.7159 - val_loss: 33.9984 Epoch 11/20 500/500 [==============================] - 13s 25ms/step - loss: 43.7163 - val_loss: 20.2754 Epoch 12/20 500/500 [==============================] - 13s 25ms/step - loss: 48.4836 - val_loss: 57.2961 Epoch 13/20 500/500 [==============================] - 13s 25ms/step - loss: 53.3893 - val_loss: 45.2789 Epoch 14/20 500/500 [==============================] - 13s 26ms/step - loss: 58.5179 - val_loss: 19.6612 Epoch 15/20 500/500 [==============================] - 13s 25ms/step - loss: 63.3970 - val_loss: 66.5079 Epoch 16/20 500/500 [==============================] - 13s 25ms/step - loss: 68.5922 - val_loss: 92.6845 Epoch 17/20 500/500 [==============================] - 13s 25ms/step - loss: 73.1853 - val_loss: 83.0099 Epoch 18/20 500/500 [==============================] - 13s 26ms/step - loss: 78.0382 - val_loss: 101.5267 Epoch 19/20 500/500 [==============================] - 13s 26ms/step - loss: 82.7113 - val_loss: 132.5263 Epoch 20/20 500/500 [==============================] - 13s 25ms/step - loss: 88.3620 - val_loss: 68.7708
evaluate_naive_method()
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-12-043d54e5430b> in <module>() ----> 1 evaluate_naive_method() <ipython-input-10-1bb1d5605744> in evaluate_naive_method() 2 batch_maes = [] 3 for step in range(val_steps): ----> 4 samples, targets = next(val_gen) 5 preds = samples[:, -1, 1] 6 mae = np.mean(np.abs(preds - targets)) ValueError: generator already executing
# Visualização dos resultados
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Nesse treino, utilizamos a sequência de dados como um todo, sem considerar como cada medida T em um tempo t influência o a temperatura T+1.
Da mesma maneira que com a sequência de palavras no review, a ordem e a causalidade importante!
Uilizaremos uma GRU (gated recurrent unit, "unidade recorrente fechada"), que são similares a LSTM, mas são mais baratas computacionalmente.
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where Epoch 1/20 500/500 [==============================] - 114s 229ms/step - loss: 0.3119 - val_loss: 0.2720 Epoch 2/20 500/500 [==============================] - 113s 226ms/step - loss: 0.2854 - val_loss: 0.2676 Epoch 3/20 500/500 [==============================] - 113s 227ms/step - loss: 0.2783 - val_loss: 0.2615 Epoch 4/20 500/500 [==============================] - 113s 227ms/step - loss: 0.2733 - val_loss: 0.2721 Epoch 5/20 500/500 [==============================] - 112s 224ms/step - loss: 0.2662 - val_loss: 0.2637 Epoch 6/20 500/500 [==============================] - 112s 224ms/step - loss: 0.2587 - val_loss: 0.2695 Epoch 7/20 500/500 [==============================] - 112s 225ms/step - loss: 0.2538 - val_loss: 0.2691 Epoch 8/20 500/500 [==============================] - 113s 226ms/step - loss: 0.2502 - val_loss: 0.2699 Epoch 9/20 500/500 [==============================] - 113s 226ms/step - loss: 0.2440 - val_loss: 0.2730 Epoch 10/20 500/500 [==============================] - 113s 226ms/step - loss: 0.2425 - val_loss: 0.2740 Epoch 11/20 500/500 [==============================] - 113s 226ms/step - loss: 0.2365 - val_loss: 0.2757 Epoch 12/20 500/500 [==============================] - 113s 226ms/step - loss: 0.2312 - val_loss: 0.2734 Epoch 13/20 500/500 [==============================] - 113s 225ms/step - loss: 0.2287 - val_loss: 0.2787 Epoch 14/20 500/500 [==============================] - 113s 227ms/step - loss: 0.2261 - val_loss: 0.2785 Epoch 15/20 500/500 [==============================] - 113s 226ms/step - loss: 0.2216 - val_loss: 0.2801 Epoch 16/20 500/500 [==============================] - 114s 229ms/step - loss: 0.2185 - val_loss: 0.2854 Epoch 17/20 500/500 [==============================] - 113s 227ms/step - loss: 0.2155 - val_loss: 0.2881 Epoch 18/20 500/500 [==============================] - 112s 225ms/step - loss: 0.2113 - val_loss: 0.2863 Epoch 19/20 500/500 [==============================] - 112s 224ms/step - loss: 0.2097 - val_loss: 0.2895 Epoch 20/20 500/500 [==============================] - 112s 224ms/step - loss: 0.2059 - val_loss: 0.2929
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Para evitar o overfitting usar o dropout é uma boa opção, uma vez que ele 'quebra' algumas correlações entre os dados de treinamento, evitando o superajuste.
Fazer isso em redes recorrentes não é trivial, pois necessitamos das informações prévias para o treinamento recorrente.
Yarin Gal: a máscara de dropout deve ser a mesma em todos os timesteps. Uma máscara de dropout constante deve ser aplicada às ativações internas da camada recorrente, numa espécie de "dropout recorrente".
No Keras, cada camada recorrente pode ter dois argumentos de dropout:
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32,
dropout=0.2,
recurrent_dropout=0.2,
input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:148: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3733: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version. Instructions for updating: Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`. Epoch 1/40 500/500 [==============================] - 133s 266ms/step - loss: 0.3326 - val_loss: 0.2759 Epoch 2/40 500/500 [==============================] - 132s 265ms/step - loss: 0.3143 - val_loss: 0.2760 Epoch 3/40 500/500 [==============================] - 131s 262ms/step - loss: 0.3074 - val_loss: 0.2707 Epoch 4/40 500/500 [==============================] - 131s 262ms/step - loss: 0.3029 - val_loss: 0.2727 Epoch 5/40 500/500 [==============================] - 132s 265ms/step - loss: 0.3019 - val_loss: 0.2699 Epoch 6/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2993 - val_loss: 0.2680 Epoch 7/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2956 - val_loss: 0.2676 Epoch 8/40 500/500 [==============================] - 133s 267ms/step - loss: 0.2924 - val_loss: 0.2670 Epoch 9/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2913 - val_loss: 0.2660 Epoch 10/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2906 - val_loss: 0.2653 Epoch 11/40 500/500 [==============================] - 133s 267ms/step - loss: 0.2895 - val_loss: 0.2651 Epoch 12/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2894 - val_loss: 0.2656 Epoch 13/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2872 - val_loss: 0.2673 Epoch 14/40 500/500 [==============================] - 133s 267ms/step - loss: 0.2856 - val_loss: 0.2655 Epoch 15/40 500/500 [==============================] - 132s 265ms/step - loss: 0.2831 - val_loss: 0.2630 Epoch 16/40 500/500 [==============================] - 133s 267ms/step - loss: 0.2822 - val_loss: 0.2654 Epoch 17/40 500/500 [==============================] - 133s 265ms/step - loss: 0.2844 - val_loss: 0.2636 Epoch 18/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2825 - val_loss: 0.2625 Epoch 19/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2793 - val_loss: 0.2643 Epoch 20/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2800 - val_loss: 0.2625 Epoch 21/40 500/500 [==============================] - 133s 267ms/step - loss: 0.2788 - val_loss: 0.2641 Epoch 22/40 500/500 [==============================] - 134s 268ms/step - loss: 0.2779 - val_loss: 0.2641 Epoch 23/40 500/500 [==============================] - 133s 267ms/step - loss: 0.2755 - val_loss: 0.2645 Epoch 24/40 500/500 [==============================] - 132s 265ms/step - loss: 0.2754 - val_loss: 0.2643 Epoch 25/40 500/500 [==============================] - 130s 261ms/step - loss: 0.2756 - val_loss: 0.2620 Epoch 26/40 500/500 [==============================] - 130s 261ms/step - loss: 0.2747 - val_loss: 0.2671 Epoch 27/40 500/500 [==============================] - 130s 259ms/step - loss: 0.2738 - val_loss: 0.2609 Epoch 28/40 500/500 [==============================] - 130s 259ms/step - loss: 0.2737 - val_loss: 0.2655 Epoch 29/40 500/500 [==============================] - 132s 264ms/step - loss: 0.2725 - val_loss: 0.2654 Epoch 30/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2734 - val_loss: 0.2623 Epoch 31/40 500/500 [==============================] - 133s 267ms/step - loss: 0.2726 - val_loss: 0.2656 Epoch 32/40 500/500 [==============================] - 132s 264ms/step - loss: 0.2705 - val_loss: 0.2650 Epoch 33/40 500/500 [==============================] - 132s 265ms/step - loss: 0.2725 - val_loss: 0.2651 Epoch 34/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2701 - val_loss: 0.2627 Epoch 35/40 500/500 [==============================] - 134s 267ms/step - loss: 0.2709 - val_loss: 0.2731 Epoch 36/40 500/500 [==============================] - 133s 267ms/step - loss: 0.2699 - val_loss: 0.2614 Epoch 37/40 500/500 [==============================] - 134s 267ms/step - loss: 0.2698 - val_loss: 0.2640 Epoch 38/40 500/500 [==============================] - 133s 266ms/step - loss: 0.2693 - val_loss: 0.2627 Epoch 39/40 500/500 [==============================] - 132s 263ms/step - loss: 0.2685 - val_loss: 0.2627 Epoch 40/40 500/500 [==============================] - 133s 265ms/step - loss: 0.2661 - val_loss: 0.2632
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
bla bla
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32,
dropout=0.1,
recurrent_dropout=0.5,
return_sequences=True,
input_shape=(None, float_data.shape[-1])))
model.add(layers.GRU(64, activation='relu',
dropout=0.1,
recurrent_dropout=0.5))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
Epoch 1/40 500/500 [==============================] - 341s 681ms/step - loss: 0.3336 - val_loss: 0.2745 Epoch 2/40 500/500 [==============================] - 338s 675ms/step - loss: 0.3144 - val_loss: 0.2722 Epoch 3/40 500/500 [==============================] - 341s 683ms/step - loss: 0.3068 - val_loss: 0.2724 Epoch 4/40 500/500 [==============================] - 339s 678ms/step - loss: 0.3012 - val_loss: 0.2697 Epoch 5/40 500/500 [==============================] - 339s 678ms/step - loss: 0.2969 - val_loss: 0.2646 Epoch 6/40 500/500 [==============================] - 339s 678ms/step - loss: 0.2952 - val_loss: 0.2710 Epoch 7/40 500/500 [==============================] - 338s 676ms/step - loss: 0.2924 - val_loss: 0.2675 Epoch 8/40 500/500 [==============================] - 339s 677ms/step - loss: 0.2894 - val_loss: 0.2626 Epoch 9/40 500/500 [==============================] - 338s 675ms/step - loss: 0.2856 - val_loss: 0.2641 Epoch 10/40 500/500 [==============================] - 338s 677ms/step - loss: 0.2845 - val_loss: 0.2637 Epoch 11/40 161/500 [========>.....................] - ETA: 2:54 - loss: 0.2808
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
bla bla bla
def reverse_order_generator(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples[:, ::-1, :], targets
train_gen_reverse = reverse_order_generator(
float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen_reverse = reverse_order_generator(
float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size)
model = Sequential()
model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen_reverse,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen_reverse,
validation_steps=val_steps)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
bla bla
from keras.datasets import imdb
from keras.preprocessing import sequence
from keras import layers
from keras.models import Sequential
# Number of words to consider as features
max_features = 10000
# Cut texts after this number of words (among top max_features most common words)
maxlen = 500
# Load data
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
# Reverse sequences
x_train = [x[::-1] for x in x_train]
x_test = [x[::-1] for x in x_test]
# Pad sequences
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
model = Sequential()
model.add(layers.Embedding(max_features, 128))
model.add(layers.LSTM(32))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
msdklfsdkjmf kdsjfndz kndfmzdx
from keras import backend as K
K.clear_session()
model = Sequential()
model.add(layers.Embedding(max_features, 32))
model.add(layers.Bidirectional(layers.LSTM(32)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=0.2)
dskjfjsd,z kjdjsfnds
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Bidirectional(
layers.GRU(32), input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)