Python_Code: Added python code after running pylint
[thoth.git] / models / failure_prediction / python / stacked_lstm_correlation.py
diff --git a/models/failure_prediction/python/stacked_lstm_correlation.py b/models/failure_prediction/python/stacked_lstm_correlation.py
new file mode 100644 (file)
index 0000000..e5ffd78
--- /dev/null
@@ -0,0 +1,405 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102, C0301, W0611, C0411, W0311, W0404, E0602, C0326, C0330, W0106, C0412
+# -*- coding: utf-8 -*-
+"""stacked_LSTM_Correlation.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+    https://colab.research.google.com/drive/1x8vGD105bcSgNTyC2sx0C3ixUsVPvDQ4
+
+Contributors: **Rohit Singh Rathaur, Girish L.**
+
+Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from keras.layers import Activation, Dense, Dropout
+import seaborn as sns
+import os
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from google.colab import drive
+drive.mount('/gdrive')
+
+# Importing libraries
+
+df_Ellis = pd.read_csv(
+    "/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/Ellis_FinalTwoConditionwithOR.csv")
+df_Ellis
+
+df_Ellis.plot()
+
+# we show here the hist
+df_Ellis.hist(bins=100, figsize=(20, 15))
+# save_fig("attribute_histogram_plots")
+plt.show()
+
+cpu_system_perc = df_Ellis[['ellis-cpu.system_perc']]
+cpu_system_perc.rolling(12).mean().plot(
+    figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+load_avg_1_min = df_Ellis[['ellis-load.avg_1_min']]
+load_avg_1_min.rolling(12).mean().plot(
+    figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+cpu_wait_perc = df_Ellis[['ellis-cpu.wait_perc']]
+cpu_wait_perc.rolling(12).mean().plot(
+    figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=30)
+
+df_dg = pd.concat([cpu_system_perc.rolling(12).mean(), load_avg_1_min.rolling(
+    12).mean(), cpu_wait_perc.rolling(12).mean()], axis=1)
+df_dg.plot(figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=20)
+
+
+# we establish the corrmartrice
+color = sns.color_palette()
+sns.set_style('darkgrid')
+
+correaltionMatrice = df_Ellis.corr()
+f, ax = plt.subplots(figsize=(20, 10))
+sns.heatmap(
+    correaltionMatrice,
+    cbar=True,
+    vmin=0,
+    vmax=1,
+    square=True,
+    annot=True)
+plt.show()
+
+df_Ellis.corrwith(df_Ellis['ellis-load.avg_1_min'])
+
+# using multivariate feature
+
+features_3 = [
+    'ellis-cpu.wait_perc',
+    'ellis-load.avg_1_min',
+    'ellis-net.in_bytes_sec',
+    'Label']
+
+features = df_Ellis[features_3]
+features.index = df_Ellis['Timestamp']
+features.head()
+
+features.plot(subplots=True)
+
+features = features.values
+
+# standardize data
+train_split = 141600
+tf.random.set_seed(13)
+
+# standardize data
+features_mean = features[:train_split].mean()
+features_std = features[:train_split].std()
+features = (features - features_mean) / features_std
+
+print(type(features))
+print(features.shape)
+
+# create mutlivariate data
+
+
+def mutlivariate_data(
+        features,
+        target,
+        start_idx,
+        end_idx,
+        history_size,
+        target_size,
+        step,
+        single_step=False):
+    data = []
+    labels = []
+    start_idx = start_idx + history_size
+    if end_idx is None:
+        end_idx = len(features) - target_size
+    for i in range(start_idx, end_idx):
+        idxs = range(i - history_size, i, step)  # using step
+        data.append(features[idxs])
+        if single_step:
+            labels.append(target[i + target_size])
+        else:
+            labels.append(target[i:i + target_size])
+
+    return np.array(data), np.array(labels)
+
+# generate multivariate data
+
+
+history = 720
+future_target = 72
+STEP = 6
+
+x_train_ss, y_train_ss = mutlivariate_data(
+    features, features[:, 1], 0, train_split, history, future_target, STEP, single_step=True)
+
+x_val_ss, y_val_ss = mutlivariate_data(features, features[:, 1], train_split, None, history,
+                                       future_target, STEP, single_step=True)
+
+print(x_train_ss.shape, y_train_ss.shape)
+print(x_val_ss.shape, y_val_ss.shape)
+
+# tensorflow dataset
+batch_size = 256
+buffer_size = 10000
+
+train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))
+train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))
+val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+print(train_ss)
+print(val_ss)
+
+
+def root_mean_squared_error(y_true, y_pred):
+    return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
+
+"""## Why Increase Depth?
+Stacking LSTM hidden layers makes the model deeper, more accurately earning the description as a deep learning technique. It is the depth of neural networks that is generally attributed to the success of the approach on a wide range of challenging prediction problems.
+
+As Stacked LSTMs are now a stable technique for challenging sequence prediction problems. A Stacked LSTM architecture is defined as an LSTM model comprised of multiple LSTM layers. An LSTM layer above provides a sequence output rather than a single value output to the LSTM layer below. Specifically, one output per input time step, rather than one output time step for all input time steps.
+
+We created Stacked LSTM model using Keras which is a Python deep learning library.
+"""
+
+# Modelling using LSTM
+steps = 50
+
+EPOCHS = 20
+
+single_step_model = tf.keras.models.Sequential()
+
+single_step_model.add(tf.keras.layers.LSTM(
+    32, return_sequences=True, input_shape=x_train_ss.shape[-2:]))
+single_step_model.add(tf.keras.layers.Dropout(0.3))
+single_step_model.add(tf.keras.layers.LSTM(units=100, return_sequences=False))
+single_step_model.add(tf.keras.layers.Dropout(0.2))
+#model.add(Dense(units=1, activation='relu'))
+single_step_model.add(tf.keras.layers.Activation("relu"))
+single_step_model.add(tf.keras.layers.Dense(1))
+single_step_model.compile(
+    optimizer=tf.keras.optimizers.Adam(),
+    loss='mae',
+    metrics=[
+        tf.keras.metrics.RootMeanSquaredError(
+            name='rmse')])
+#single_step_model.compile(loss='mse', optimizer='rmsprop')
+single_step_model_history = single_step_model.fit(
+    train_ss,
+    epochs=EPOCHS,
+    steps_per_epoch=steps,
+    validation_data=val_ss,
+    validation_steps=50)
+
+
+single_step_model.summary()
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+    loss = history.history['loss']
+    val_loss = history.history['val_loss']
+
+    epochs = range(len(loss))
+    plt.figure()
+    plt.plot(epochs, loss, 'b', label='Train Loss')
+    plt.plot(epochs, val_loss, 'r', label='Validation Loss')
+    plt.title(title)
+    plt.legend()
+    plt.grid()
+    plt.show()
+
+
+plot_loss(single_step_model_history,
+          'Single Step Training and validation loss')
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+    loss = history.history['rmse']
+    val_loss = history.history['val_rmse']
+
+    epochs = range(len(loss))
+    plt.figure()
+    plt.plot(epochs, loss, 'b', label='Train RMSE')
+    plt.plot(epochs, val_loss, 'r', label='Validation RMSE')
+    plt.title(title)
+    plt.legend()
+    plt.grid()
+    plt.show()
+
+
+plot_loss(single_step_model_history,
+          'Single Step Training and validation loss')
+
+# fucntion to create time steps
+
+
+def create_time_steps(length):
+    return list(range(-length, 0))
+
+# function to plot time series data
+
+
+def plot_time_series(plot_data, delta, title):
+    labels = ["History", 'True Future', 'Model Predcited']
+    marker = ['.-', 'rx', 'go']
+    time_steps = create_time_steps(plot_data[0].shape[0])
+
+    if delta:
+        future = delta
+    else:
+        future = 0
+    plt.title(title)
+    for i, x in enumerate(plot_data):
+        if i:
+            plt.plot(
+                future,
+                plot_data[i],
+                marker[i],
+                markersize=10,
+                label=labels[i])
+        else:
+            plt.plot(
+                time_steps,
+                plot_data[i].flatten(),
+                marker[i],
+                label=labels[i])
+    plt.legend()
+    plt.xlim([time_steps[0], (future + 5) * 2])
+
+    plt.xlabel('Time_Step')
+    return plt
+
+# Moving window average
+
+
+def MWA(history):
+    return np.mean(history)
+
+# plot time series and predicted values
+
+
+for x, y in val_ss.take(5):
+    plot = plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),
+                             single_step_model.predict(x)[0]], 12,
+                            'Single Step Prediction')
+    plot.show()
+
+"""# **MultiStep Forcasting**"""
+
+future_target = 72  # 72 future values
+x_train_multi, y_train_multi = mutlivariate_data(features, features[:, 1], 0,
+                                                 train_split, history,
+                                                 future_target, STEP)
+x_val_multi, y_val_multi = mutlivariate_data(features, features[:, 1],
+                                             train_split, None, history,
+                                             future_target, STEP)
+
+print(x_train_multi.shape)
+print(y_train_multi.shape)
+
+#  TF DATASET
+
+train_data_multi = tf.data.Dataset.from_tensor_slices(
+    (x_train_multi, y_train_multi))
+train_data_multi = train_data_multi.cache().shuffle(
+    buffer_size).batch(batch_size).repeat()
+
+val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
+val_data_multi = val_data_multi.batch(batch_size).repeat()
+
+print(train_data_multi)
+print(val_data_multi)
+
+# plotting function
+
+
+def multi_step_plot(history, true_future, prediction):
+    plt.figure(figsize=(12, 6))
+    num_in = create_time_steps(len(history))
+    num_out = len(true_future)
+    plt.grid()
+    plt.plot(num_in, np.array(history[:, 1]), label='History')
+    plt.plot(np.arange(num_out) / STEP, np.array(true_future), 'bo',
+             label='True Future')
+    if prediction.any():
+        plt.plot(np.arange(num_out) / STEP, np.array(prediction), 'ro',
+                 label='Predicted Future')
+    plt.legend(loc='upper left')
+    plt.show()
+
+
+for x, y in train_data_multi.take(1):
+    multi_step_plot(x[0], y[0], np.array([0]))
+
+multi_step_model = tf.keras.models.Sequential()
+multi_step_model.add(tf.keras.layers.LSTM(
+    32, return_sequences=True, input_shape=x_train_multi.shape[-2:]))
+multi_step_model.add(tf.keras.layers.Dropout(0.2))
+multi_step_model.add(tf.keras.layers.LSTM(units=100, return_sequences=False))
+multi_step_model.add(tf.keras.layers.Dropout(0.2))
+#model.add(Dense(units=1, activation='relu'))
+multi_step_model.add(tf.keras.layers.Activation("relu"))
+# aDD dropout layer (0.3)
+multi_step_model.add(tf.keras.layers.Dense(72))  # for 72 outputs
+
+multi_step_model.compile(
+    optimizer=tf.keras.optimizers.RMSprop(
+        clipvalue=1.0), loss='mae', metrics=[
+            tf.keras.metrics.RootMeanSquaredError(
+                name='rmse')])
+
+multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,
+                                          steps_per_epoch=steps,
+                                          validation_data=val_data_multi,
+                                          validation_steps=50)
+
+plot_loss(multi_step_history, 'Multi-Step Training and validation loss')
+
+for x, y in val_data_multi.take(5):
+    multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
+
+scores = multi_step_model.evaluate(
+    x_train_multi,
+    y_train_multi,
+    verbose=1,
+    batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+scores_test = multi_step_model.evaluate(
+    x_val_multi, y_val_multi, verbose=1, batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+y_pred_test = multi_step_model.predict(x_val_multi, verbose=0)
+
+plt.figure(figsize=(10, 5))
+plt.plot(y_pred_test)
+plt.plot(y_val_multi)
+plt.ylabel("RUL")
+plt.xlabel("Unit Number")
+plt.legend(loc='upper left')
+plt.show()