[MODELS] Cleanup Jupyter Notebooks.
[thoth.git] / models / failure_prediction / jnotebooks / stacked_LSTM_Correlation.ipynb
1 {
2  "cells": [
3   {
4    "cell_type": "markdown",
5    "metadata": {
6     "id": "_EvHebagXgW4"
7    },
8    "source": [
9     "Contributors: **Rohit Singh Rathaur, Girish L.** \n",
10     "\n",
11     "Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]\n",
12     "\n",
13     "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
14     "you may not use this file except in compliance with the License.\n",
15     "You may obtain a copy of the License at\n",
16     "\n",
17     "    http://www.apache.org/licenses/LICENSE-2.0\n",
18     "\n",
19     "Unless required by applicable law or agreed to in writing, software\n",
20     "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
21     "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
22     "See the License for the specific language governing permissions and\n",
23     "limitations under the License."
24    ]
25   },
26   {
27    "cell_type": "code",
28    "execution_count": null,
29    "metadata": {
30     "colab": {
31      "base_uri": "https://localhost:8080/"
32     },
33     "id": "YQ6lT1e2hrx4",
34     "outputId": "d11aafe6-20e9-4719-a751-0da58234f8c2"
35    },
36    "outputs": [],
37    "source": [
38     "from google.colab import drive\n",
39     "drive.mount('/gdrive')"
40    ]
41   },
42   {
43    "cell_type": "code",
44    "execution_count": null,
45    "metadata": {
46     "id": "tLhroy5BnMnC"
47    },
48    "outputs": [],
49    "source": [
50     "# Importing libraries\n",
51     "import tensorflow as tf\n",
52     "import matplotlib.pyplot as plt\n",
53     "import matplotlib as mpl\n",
54     "import pandas as pd\n",
55     "import numpy as np\n",
56     "import os"
57    ]
58   },
59   {
60    "cell_type": "code",
61    "execution_count": null,
62    "metadata": {
63     "colab": {
64      "base_uri": "https://localhost:8080/",
65      "height": 419
66     },
67     "id": "2-UpMVsSnfCI",
68     "outputId": "8c28f578-4405-4a27-81e4-c0fd00475b96"
69    },
70    "outputs": [],
71    "source": [
72     "df_Ellis  = pd.read_csv(\"/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/Ellis_FinalTwoConditionwithOR.csv\")\n",
73     "df_Ellis"
74    ]
75   },
76   {
77    "cell_type": "code",
78    "execution_count": null,
79    "metadata": {
80     "colab": {
81      "base_uri": "https://localhost:8080/",
82      "height": 293
83     },
84     "id": "92xBt43BnjAo",
85     "outputId": "179123d5-8aec-4dda-a400-9096dec19dd8"
86    },
87    "outputs": [],
88    "source": [
89     "df_Ellis.plot()"
90    ]
91   },
92   {
93    "cell_type": "code",
94    "execution_count": null,
95    "metadata": {
96     "colab": {
97      "base_uri": "https://localhost:8080/",
98      "height": 879
99     },
100     "id": "RSo-aa-SIoBR",
101     "outputId": "75071f5e-5942-4939-a33b-2dc8780e260c"
102    },
103    "outputs": [],
104    "source": [
105     "# we show here the hist\n",
106     "df_Ellis.hist(bins=100,figsize=(20,15))\n",
107     "#save_fig(\"attribute_histogram_plots\")\n",
108     "plt.show()"
109    ]
110   },
111   {
112    "cell_type": "code",
113    "execution_count": null,
114    "metadata": {
115     "colab": {
116      "base_uri": "https://localhost:8080/",
117      "height": 634
118     },
119     "id": "gggaMJ_2LtFs",
120     "outputId": "af5ce749-d888-4cae-ddcd-9a5a5ce6e558"
121    },
122    "outputs": [],
123    "source": [
124     "cpu_system_perc = df_Ellis[['ellis-cpu.system_perc']] \n",
125     "cpu_system_perc.rolling(12).mean().plot(figsize=(20,10), linewidth=5, fontsize=20) \n",
126     "plt.xlabel('Timestamp', fontsize=30);"
127    ]
128   },
129   {
130    "cell_type": "code",
131    "execution_count": null,
132    "metadata": {
133     "colab": {
134      "base_uri": "https://localhost:8080/",
135      "height": 634
136     },
137     "id": "R_ctvXcQL1Xf",
138     "outputId": "8beab52c-ffb7-415c-c84c-04ed39cd70cf"
139    },
140    "outputs": [],
141    "source": [
142     "load_avg_1_min = df_Ellis[['ellis-load.avg_1_min']] \n",
143     "load_avg_1_min.rolling(12).mean().plot(figsize=(20,10), linewidth=5, fontsize=20) \n",
144     "plt.xlabel('Timestamp', fontsize=30);"
145    ]
146   },
147   {
148    "cell_type": "code",
149    "execution_count": null,
150    "metadata": {
151     "colab": {
152      "base_uri": "https://localhost:8080/",
153      "height": 634
154     },
155     "id": "Gkd5ecCmL6Bw",
156     "outputId": "a6c042dc-999a-456c-b233-a1e05612e15f"
157    },
158    "outputs": [],
159    "source": [
160     "cpu_wait_perc = df_Ellis[['ellis-cpu.wait_perc']] \n",
161     "cpu_wait_perc.rolling(12).mean().plot(figsize=(20,10), linewidth=5, fontsize=20) \n",
162     "plt.xlabel('Year', fontsize=30);"
163    ]
164   },
165   {
166    "cell_type": "code",
167    "execution_count": null,
168    "metadata": {
169     "colab": {
170      "base_uri": "https://localhost:8080/",
171      "height": 624
172     },
173     "id": "EycZrQU0MBSX",
174     "outputId": "6f325187-ee1b-4122-97f7-80ffdbf149d2"
175    },
176    "outputs": [],
177    "source": [
178     "df_dg = pd.concat([cpu_system_perc.rolling(12).mean(), load_avg_1_min.rolling(12).mean(),cpu_wait_perc.rolling(12).mean()], axis=1) \n",
179     "df_dg.plot(figsize=(20,10), linewidth=5, fontsize=20) \n",
180     "plt.xlabel('Year', fontsize=20); "
181    ]
182   },
183   {
184    "cell_type": "code",
185    "execution_count": null,
186    "metadata": {
187     "id": "YoQA_MIBMknS"
188    },
189    "outputs": [],
190    "source": []
191   },
192   {
193    "cell_type": "code",
194    "execution_count": null,
195    "metadata": {
196     "colab": {
197      "base_uri": "https://localhost:8080/",
198      "height": 710
199     },
200     "id": "Pi8UMMitMa3Q",
201     "outputId": "a44c4d8b-e9d2-4978-fb9e-ae179758e6bc"
202    },
203    "outputs": [],
204    "source": [
205     "# we establish the corrmartrice\n",
206     "import seaborn as sns\n",
207     "color = sns.color_palette()\n",
208     "sns.set_style('darkgrid')\n",
209     "\n",
210     "correaltionMatrice = df_Ellis.corr()\n",
211     "f, ax = plt.subplots(figsize=(20, 10))\n",
212     "sns.heatmap(correaltionMatrice, cbar=True, vmin=0, vmax=1, square=True, annot=True);\n",
213     "plt.show()"
214    ]
215   },
216   {
217    "cell_type": "code",
218    "execution_count": null,
219    "metadata": {
220     "colab": {
221      "base_uri": "https://localhost:8080/"
222     },
223     "id": "rkYwyKtXMvpy",
224     "outputId": "c3e3d463-1089-48f7-da23-01a7b92423a5"
225    },
226    "outputs": [],
227    "source": [
228     "df_Ellis.corrwith(df_Ellis['ellis-load.avg_1_min'])"
229    ]
230   },
231   {
232    "cell_type": "code",
233    "execution_count": null,
234    "metadata": {
235     "colab": {
236      "base_uri": "https://localhost:8080/",
237      "height": 235
238     },
239     "id": "5oQK-ddinvCM",
240     "outputId": "7e9b0a07-d2de-4e42-d45f-572a8abeae9e"
241    },
242    "outputs": [],
243    "source": [
244     "## ## using multivariate feature \n",
245     "\n",
246     "features_3 = ['ellis-cpu.wait_perc', 'ellis-load.avg_1_min', 'ellis-net.in_bytes_sec', 'Label']\n",
247     "\n",
248     "features = df_Ellis[features_3]\n",
249     "features.index = df_Ellis['Timestamp']\n",
250     "features.head()"
251    ]
252   },
253   {
254    "cell_type": "code",
255    "execution_count": null,
256    "metadata": {
257     "colab": {
258      "base_uri": "https://localhost:8080/",
259      "height": 386
260     },
261     "id": "qbqn755fo81g",
262     "outputId": "f8c5d0a8-c26a-4aa5-a530-957480984106"
263    },
264    "outputs": [],
265    "source": [
266     "features.plot(subplots=True)"
267    ]
268   },
269   {
270    "cell_type": "code",
271    "execution_count": null,
272    "metadata": {
273     "id": "jJQD1x9psWCH"
274    },
275    "outputs": [],
276    "source": [
277     "features = features.values"
278    ]
279   },
280   {
281    "cell_type": "code",
282    "execution_count": null,
283    "metadata": {
284     "colab": {
285      "base_uri": "https://localhost:8080/"
286     },
287     "id": "xf8WCiykpUzN",
288     "outputId": "4dca8628-c567-41b0-f262-bc6a3dddb5a7"
289    },
290    "outputs": [],
291    "source": [
292     "### standardize data\n",
293     "train_split = 141600\n",
294     "tf.random.set_seed(13)\n",
295     "\n",
296     "### standardize data\n",
297     "features_mean = features[:train_split].mean()\n",
298     "features_std = features[:train_split].std()\n",
299     "features  = (features - features_mean)/ features_std\n",
300     "\n",
301     "print(type(features))\n",
302     "print(features.shape)\n"
303    ]
304   },
305   {
306    "cell_type": "code",
307    "execution_count": null,
308    "metadata": {
309     "id": "1a0hNDmppnLB"
310    },
311    "outputs": [],
312    "source": [
313     "### create mutlivariate data\n",
314     "\n",
315     "def mutlivariate_data(features , target , start_idx , end_idx , history_size , target_size,\n",
316     "                      step ,  single_step = False):\n",
317     "  data = []\n",
318     "  labels = []\n",
319     "  start_idx = start_idx + history_size\n",
320     "  if end_idx is None:\n",
321     "    end_idx = len(features)- target_size\n",
322     "  for i in range(start_idx , end_idx ):\n",
323     "    idxs = range(i-history_size, i, step) ### using step\n",
324     "    data.append(features[idxs])\n",
325     "    if single_step:\n",
326     "      labels.append(target[i+target_size])\n",
327     "    else:\n",
328     "      labels.append(target[i:i+target_size])\n",
329     "\n",
330     "  return np.array(data) , np.array(labels)"
331    ]
332   },
333   {
334    "cell_type": "code",
335    "execution_count": null,
336    "metadata": {
337     "colab": {
338      "base_uri": "https://localhost:8080/"
339     },
340     "id": "Z0CivgkitfgE",
341     "outputId": "7747f2fc-9d3d-4e46-9148-39708b9ae947"
342    },
343    "outputs": [],
344    "source": [
345     "### generate multivariate data\n",
346     "\n",
347     "history = 720\n",
348     "future_target = 72\n",
349     "STEP = 6\n",
350     "\n",
351     "x_train_ss , y_train_ss = mutlivariate_data(features , features[:, 1], 0, train_split, history,\n",
352     "                                            future_target, STEP , single_step = True)\n",
353     "\n",
354     "x_val_ss , y_val_ss = mutlivariate_data(features , features[:,1] , train_split , None , history ,\n",
355     "                                        future_target, STEP, single_step = True)\n",
356     "\n",
357     "print(x_train_ss.shape , y_train_ss.shape)\n",
358     "print(x_val_ss.shape , y_val_ss.shape)"
359    ]
360   },
361   {
362    "cell_type": "code",
363    "execution_count": null,
364    "metadata": {
365     "colab": {
366      "base_uri": "https://localhost:8080/"
367     },
368     "id": "VBdr2epGu3aq",
369     "outputId": "f3dccf5d-76c2-4c9a-898b-9a221f5a491c"
370    },
371    "outputs": [],
372    "source": [
373     "## tensorflow dataset\n",
374     "batch_size = 256\n",
375     "buffer_size = 10000\n",
376     "\n",
377     "train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))\n",
378     "train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()\n",
379     "\n",
380     "val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))\n",
381     "val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()\n",
382     "\n",
383     "print(train_ss)\n",
384     "print(val_ss)"
385    ]
386   },
387   {
388    "cell_type": "code",
389    "execution_count": null,
390    "metadata": {
391     "id": "9eQpwUyGglu_"
392    },
393    "outputs": [],
394    "source": [
395     "def root_mean_squared_error(y_true, y_pred):\n",
396     "        return K.sqrt(K.mean(K.square(y_pred - y_true))) "
397    ]
398   },
399   {
400    "cell_type": "markdown",
401    "metadata": {
402     "id": "XkGTfQ46NAlo"
403    },
404    "source": [
405     "## Why Increase Depth?\n",
406     "Stacking LSTM hidden layers makes the model deeper, more accurately earning the description as a deep learning technique. It is the depth of neural networks that is generally attributed to the success of the approach on a wide range of challenging prediction problems.\n",
407     "\n",
408     "As Stacked LSTMs are now a stable technique for challenging sequence prediction problems. A Stacked LSTM architecture is defined as an LSTM model comprised of multiple LSTM layers. An LSTM layer above provides a sequence output rather than a single value output to the LSTM layer below. Specifically, one output per input time step, rather than one output time step for all input time steps.\n",
409     "\n",
410     "We created Stacked LSTM model using Keras which is a Python deep learning library. "
411    ]
412   },
413   {
414    "cell_type": "code",
415    "execution_count": null,
416    "metadata": {
417     "colab": {
418      "base_uri": "https://localhost:8080/"
419     },
420     "id": "1cKtTAzqyiyL",
421     "outputId": "eed7670c-b3b2-4a20-82bb-7ff2b5f4a0eb"
422    },
423    "outputs": [],
424    "source": [
425     "from keras.layers import Activation, Dense, Dropout\n",
426     "### Modelling using LSTM\n",
427     "steps = 50\n",
428     "\n",
429     "EPOCHS =20\n",
430     "\n",
431     "single_step_model = tf.keras.models.Sequential()\n",
432     "\n",
433     "single_step_model.add(tf.keras.layers.LSTM(32, return_sequences=True, input_shape = x_train_ss.shape[-2:]))\n",
434     "single_step_model.add(tf.keras.layers.Dropout(0.3))\n",
435     "single_step_model.add(tf.keras.layers.LSTM(units=100,return_sequences=False))\n",
436     "single_step_model.add(tf.keras.layers.Dropout(0.2))\n",
437     "#model.add(Dense(units=1, activation='relu'))\n",
438     "single_step_model.add(tf.keras.layers.Activation(\"relu\"))\n",
439     "single_step_model.add(tf.keras.layers.Dense(1))\n",
440     "single_step_model.compile(optimizer = tf.keras.optimizers.Adam(), loss = 'mae',metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])\n",
441     "#single_step_model.compile(loss='mse', optimizer='rmsprop')\n",
442     "single_step_model_history = single_step_model.fit(train_ss, epochs = EPOCHS , \n",
443     "                                                  steps_per_epoch =steps, validation_data = val_ss,\n",
444     "                                                  validation_steps = 50)\n",
445     "\n",
446     "\n",
447     "\n",
448     "single_step_model.summary()\n",
449     "\n"
450    ]
451   },
452   {
453    "cell_type": "code",
454    "execution_count": null,
455    "metadata": {
456     "colab": {
457      "base_uri": "https://localhost:8080/",
458      "height": 281
459     },
460     "id": "Pgev0dgzzBVx",
461     "outputId": "25c66158-969a-47c4-d35b-9c34abba211e"
462    },
463    "outputs": [],
464    "source": [
465     "## plot train test loss \n",
466     "\n",
467     "def plot_loss(history , title):\n",
468     "  loss = history.history['loss']\n",
469     "  val_loss = history.history['val_loss']\n",
470     "\n",
471     "  epochs = range(len(loss))\n",
472     "  plt.figure()\n",
473     "  plt.plot(epochs, loss , 'b' , label = 'Train Loss')\n",
474     "  plt.plot(epochs, val_loss , 'r' , label = 'Validation Loss')\n",
475     "  plt.title(title)\n",
476     "  plt.legend()\n",
477     "  plt.grid()\n",
478     "  plt.show()\n",
479     "\n",
480     "plot_loss(single_step_model_history , 'Single Step Training and validation loss')"
481    ]
482   },
483   {
484    "cell_type": "code",
485    "execution_count": null,
486    "metadata": {
487     "colab": {
488      "base_uri": "https://localhost:8080/",
489      "height": 281
490     },
491     "id": "EnYf6j4okEoC",
492     "outputId": "0a474737-67b6-4ff1-d6b2-37b7623a44ce"
493    },
494    "outputs": [],
495    "source": [
496     "## plot train test loss \n",
497     "\n",
498     "def plot_loss(history , title):\n",
499     "  loss = history.history['rmse']\n",
500     "  val_loss = history.history['val_rmse']\n",
501     "\n",
502     "  epochs = range(len(loss))\n",
503     "  plt.figure()\n",
504     "  plt.plot(epochs, loss , 'b' , label = 'Train RMSE')\n",
505     "  plt.plot(epochs, val_loss , 'r' , label = 'Validation RMSE')\n",
506     "  plt.title(title)\n",
507     "  plt.legend()\n",
508     "  plt.grid()\n",
509     "  plt.show()\n",
510     "\n",
511     "plot_loss(single_step_model_history , 'Single Step Training and validation loss')"
512    ]
513   },
514   {
515    "cell_type": "code",
516    "execution_count": null,
517    "metadata": {
518     "id": "WMegV8mNAwe_"
519    },
520    "outputs": [],
521    "source": [
522     "### fucntion to create time steps\n",
523     "def create_time_steps(length):\n",
524     "  return list(range(-length,0))\n",
525     "\n",
526     "### function to plot time series data\n",
527     "\n",
528     "def plot_time_series(plot_data, delta , title):\n",
529     "  labels = [\"History\" , 'True Future' , 'Model Predcited']\n",
530     "  marker = ['.-' , 'rx' , 'go']\n",
531     "  time_steps = create_time_steps(plot_data[0].shape[0])\n",
532     "\n",
533     "  if delta:\n",
534     "    future = delta\n",
535     "  else:\n",
536     "    future = 0\n",
537     "  plt.title(title)\n",
538     "  for i , x in enumerate(plot_data):\n",
539     "    if i :\n",
540     "      plt.plot(future , plot_data[i] , marker[i], markersize = 10 , label = labels[i])\n",
541     "    else:\n",
542     "      plt.plot(time_steps, plot_data[i].flatten(), marker[i], label = labels[i])\n",
543     "  plt.legend()\n",
544     "  plt.xlim([time_steps[0], (future+5) *2])\n",
545     "\n",
546     "  plt.xlabel('Time_Step')\n",
547     "  return plt"
548    ]
549   },
550   {
551    "cell_type": "code",
552    "execution_count": null,
553    "metadata": {
554     "id": "q99i2c-9XKF3"
555    },
556    "outputs": [],
557    "source": [
558     "### Moving window average\n",
559     "\n",
560     "def MWA(history):\n",
561     "  return np.mean(history)"
562    ]
563   },
564   {
565    "cell_type": "code",
566    "execution_count": null,
567    "metadata": {
568     "colab": {
569      "base_uri": "https://localhost:8080/",
570      "height": 1000
571     },
572     "id": "xFJT1rZDAUVL",
573     "outputId": "8e38f8da-d7ac-40ce-eec9-eee2320fd828"
574    },
575    "outputs": [],
576    "source": [
577     "# plot time series and predicted values\n",
578     "\n",
579     "for x, y in val_ss.take(5):\n",
580     "  plot = plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),\n",
581     "                    single_step_model.predict(x)[0]], 12,\n",
582     "                   'Single Step Prediction')\n",
583     "  plot.show()"
584    ]
585   },
586   {
587    "cell_type": "markdown",
588    "metadata": {
589     "id": "_KXWQVmyCSix"
590    },
591    "source": [
592     "# **MultiStep Forcasting**"
593    ]
594   },
595   {
596    "cell_type": "code",
597    "execution_count": null,
598    "metadata": {
599     "colab": {
600      "base_uri": "https://localhost:8080/"
601     },
602     "id": "Lu7m2Rr4AbMK",
603     "outputId": "7b932e10-bb78-4eac-8757-4aa2d915b96d"
604    },
605    "outputs": [],
606    "source": [
607     "future_target = 72 # 72 future values\n",
608     "x_train_multi, y_train_multi = mutlivariate_data(features, features[:, 1], 0,\n",
609     "                                                 train_split, history,\n",
610     "                                                 future_target, STEP)\n",
611     "x_val_multi, y_val_multi = mutlivariate_data(features, features[:, 1],\n",
612     "                                             train_split, None, history,\n",
613     "                                             future_target, STEP)\n",
614     "\n",
615     "print(x_train_multi.shape)\n",
616     "print(y_train_multi.shape)"
617    ]
618   },
619   {
620    "cell_type": "code",
621    "execution_count": null,
622    "metadata": {
623     "id": "GLRv5D16CrHj"
624    },
625    "outputs": [],
626    "source": [
627     "#  TF DATASET\n",
628     "\n",
629     "train_data_multi = tf.data.Dataset.from_tensor_slices((x_train_multi, y_train_multi))\n",
630     "train_data_multi = train_data_multi.cache().shuffle(buffer_size).batch(batch_size).repeat()\n",
631     "\n",
632     "val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))\n",
633     "val_data_multi = val_data_multi.batch(batch_size).repeat()"
634    ]
635   },
636   {
637    "cell_type": "code",
638    "execution_count": null,
639    "metadata": {
640     "colab": {
641      "base_uri": "https://localhost:8080/"
642     },
643     "id": "fjXexah9C8yg",
644     "outputId": "72c81185-3db8-4d45-f1f2-9bf5861c3a3e"
645    },
646    "outputs": [],
647    "source": [
648     "print(train_data_multi)\n",
649     "print(val_data_multi)"
650    ]
651   },
652   {
653    "cell_type": "code",
654    "execution_count": null,
655    "metadata": {
656     "colab": {
657      "base_uri": "https://localhost:8080/",
658      "height": 385
659     },
660     "id": "7mtLZ6S-DPU-",
661     "outputId": "a4a392fe-73a5-44f7-ae77-003e7c46a1ad"
662    },
663    "outputs": [],
664    "source": [
665     "#plotting function\n",
666     "def multi_step_plot(history, true_future, prediction):\n",
667     "  plt.figure(figsize=(12, 6))\n",
668     "  num_in = create_time_steps(len(history))\n",
669     "  num_out = len(true_future)\n",
670     "  plt.grid()\n",
671     "  plt.plot(num_in, np.array(history[:, 1]), label='History')\n",
672     "  plt.plot(np.arange(num_out)/STEP, np.array(true_future), 'bo',\n",
673     "           label='True Future')\n",
674     "  if prediction.any():\n",
675     "    plt.plot(np.arange(num_out)/STEP, np.array(prediction), 'ro',\n",
676     "             label='Predicted Future')\n",
677     "  plt.legend(loc='upper left')\n",
678     "  plt.show()\n",
679     "  \n",
680     "\n",
681     "\n",
682     "for x, y in train_data_multi.take(1):\n",
683     "  multi_step_plot(x[0], y[0], np.array([0]))"
684    ]
685   },
686   {
687    "cell_type": "code",
688    "execution_count": null,
689    "metadata": {
690     "colab": {
691      "base_uri": "https://localhost:8080/"
692     },
693     "id": "snN_Flr5DWQN",
694     "outputId": "8b881aff-b91b-4394-bc3d-e52261e546db"
695    },
696    "outputs": [],
697    "source": [
698     "multi_step_model = tf.keras.models.Sequential()\n",
699     "multi_step_model.add(tf.keras.layers.LSTM(32,\n",
700     "                                          return_sequences=True,\n",
701     "                                          input_shape=x_train_multi.shape[-2:]))\n",
702     "multi_step_model.add(tf.keras.layers.Dropout(0.2))\n",
703     "multi_step_model.add(tf.keras.layers.LSTM(units=100,return_sequences=False))\n",
704     "multi_step_model.add(tf.keras.layers.Dropout(0.2))\n",
705     "#model.add(Dense(units=1, activation='relu'))\n",
706     "multi_step_model.add(tf.keras.layers.Activation(\"relu\"))\n",
707     "#aDD dropout layer (0.3)\n",
708     "multi_step_model.add(tf.keras.layers.Dense(72)) # for 72 outputs\n",
709     "\n",
710     "multi_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(clipvalue=1.0), loss='mae',metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])\n",
711     "\n",
712     "multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,\n",
713     "                                          steps_per_epoch=steps,\n",
714     "                                          validation_data=val_data_multi,\n",
715     "                                          validation_steps=50)"
716    ]
717   },
718   {
719    "cell_type": "code",
720    "execution_count": null,
721    "metadata": {
722     "colab": {
723      "base_uri": "https://localhost:8080/"
724     },
725     "id": "Ay5m27doDsTt",
726     "outputId": "70f65cf2-a2fd-4a1e-b9e0-049430410a1d"
727    },
728    "outputs": [],
729    "source": [
730     "plot_loss(multi_step_history, 'Multi-Step Training and validation loss')"
731    ]
732   },
733   {
734    "cell_type": "code",
735    "execution_count": null,
736    "metadata": {
737     "colab": {
738      "base_uri": "https://localhost:8080/",
739      "height": 1000
740     },
741     "id": "6ZFP49W4D2wp",
742     "outputId": "f2d25889-8aa1-4405-d607-1ed72a0df675"
743    },
744    "outputs": [],
745    "source": [
746     "for x, y in val_data_multi.take(5):\n",
747     "  multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])"
748    ]
749   },
750   {
751    "cell_type": "code",
752    "execution_count": null,
753    "metadata": {
754     "colab": {
755      "base_uri": "https://localhost:8080/"
756     },
757     "id": "DNKMjVoAVqZP",
758     "outputId": "d853fe0e-6ca4-411f-aa83-248f61309efe"
759    },
760    "outputs": [],
761    "source": [
762     "scores = multi_step_model.evaluate(x_train_multi, y_train_multi, verbose=1, batch_size=200)\n",
763     "print('MAE: {}'.format(scores[1]))"
764    ]
765   },
766   {
767    "cell_type": "code",
768    "execution_count": null,
769    "metadata": {
770     "colab": {
771      "base_uri": "https://localhost:8080/"
772     },
773     "id": "YXcsNZ8yu9Ay",
774     "outputId": "0b7ee485-e356-4c5b-8a9c-257d8803d189"
775    },
776    "outputs": [],
777    "source": [
778     "scores_test = multi_step_model.evaluate(x_val_multi, y_val_multi, verbose=1, batch_size=200)\n",
779     "print('MAE: {}'.format(scores[1]))\n"
780    ]
781   },
782   {
783    "cell_type": "code",
784    "execution_count": null,
785    "metadata": {
786     "colab": {
787      "base_uri": "https://localhost:8080/"
788     },
789     "id": "uCFgbZEOvZ9A",
790     "outputId": "46302cac-3d92-4970-c100-e5c3a13847f0"
791    },
792    "outputs": [],
793    "source": [
794     "y_pred_test = multi_step_model.predict(x_val_multi, verbose=0)\n",
795     "\n",
796     "plt.figure(figsize=(10,5))\n",
797     "plt.plot(y_pred_test)\n",
798     "plt.plot(y_val_multi)\n",
799     "plt.ylabel(\"RUL\")\n",
800     "plt.xlabel(\"Unit Number\")\n",
801     "plt.legend(loc='upper left')\n",
802     "plt.show()"
803    ]
804   }
805  ],
806  "metadata": {
807   "accelerator": "GPU",
808   "colab": {
809    "collapsed_sections": [],
810    "name": "stacked-LSTM_Correlation.ipynb",
811    "provenance": []
812   },
813   "kernelspec": {
814    "display_name": "Python 3 (ipykernel)",
815    "language": "python",
816    "name": "python3"
817   },
818   "language_info": {
819    "codemirror_mode": {
820     "name": "ipython",
821     "version": 3
822    },
823    "file_extension": ".py",
824    "mimetype": "text/x-python",
825    "name": "python",
826    "nbconvert_exporter": "python",
827    "pygments_lexer": "ipython3",
828    "version": "3.9.7"
829   }
830  },
831  "nbformat": 4,
832  "nbformat_minor": 1
833 }