[MODELS] Cleanup Jupyter Notebooks.
[thoth.git] / models / failure_prediction / jnotebooks / CNN.ipynb
1 {
2  "cells": [
3   {
4    "cell_type": "markdown",
5    "metadata": {
6     "id": "ywp6YMCFQKEQ"
7    },
8    "source": [
9     "Contributors: **Rohit Singh Rathaur, Girish L.** \n",
10     "\n",
11     "Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]\n",
12     "\n",
13     "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
14     "you may not use this file except in compliance with the License.\n",
15     "You may obtain a copy of the License at\n",
16     "\n",
17     "    http://www.apache.org/licenses/LICENSE-2.0\n",
18     "\n",
19     "Unless required by applicable law or agreed to in writing, software\n",
20     "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
21     "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
22     "See the License for the specific language governing permissions and\n",
23     "limitations under the License."
24    ]
25   },
26   {
27    "cell_type": "code",
28    "execution_count": null,
29    "metadata": {
30     "colab": {
31      "base_uri": "https://localhost:8080/"
32     },
33     "id": "YQ6lT1e2hrx4",
34     "outputId": "21d3bdb5-0ac2-44a6-90ec-a3f2677014c4"
35    },
36    "outputs": [],
37    "source": [
38     "from google.colab import drive\n",
39     "drive.mount('/content/drive')"
40    ]
41   },
42   {
43    "cell_type": "code",
44    "execution_count": null,
45    "metadata": {
46     "id": "tLhroy5BnMnC"
47    },
48    "outputs": [],
49    "source": [
50     "# Importing libraries\n",
51     "import tensorflow as tf\n",
52     "import matplotlib.pyplot as plt\n",
53     "import matplotlib as mpl\n",
54     "import pandas as pd\n",
55     "import numpy as np\n",
56     "import os"
57    ]
58   },
59   {
60    "cell_type": "code",
61    "execution_count": null,
62    "metadata": {
63     "colab": {
64      "base_uri": "https://localhost:8080/",
65      "height": 419
66     },
67     "id": "2-UpMVsSnfCI",
68     "outputId": "b6f5691d-1e76-43a4-de55-3156dbc02b4d"
69    },
70    "outputs": [],
71    "source": [
72     "df_Ellis  = pd.read_csv(\"/content/drive/MyDrive/Failure/lstm/Ellis_FinalTwoConditionwithOR.csv\")\n",
73     "df_Ellis"
74    ]
75   },
76   {
77    "cell_type": "code",
78    "execution_count": null,
79    "metadata": {
80     "colab": {
81      "base_uri": "https://localhost:8080/",
82      "height": 293
83     },
84     "id": "92xBt43BnjAo",
85     "outputId": "ed2f1595-c32c-43eb-ae2f-b5fce96e9a92"
86    },
87    "outputs": [],
88    "source": [
89     "df_Ellis.plot()"
90    ]
91   },
92   {
93    "cell_type": "code",
94    "execution_count": null,
95    "metadata": {
96     "colab": {
97      "base_uri": "https://localhost:8080/",
98      "height": 879
99     },
100     "id": "RSo-aa-SIoBR",
101     "outputId": "63f0c61a-8a5a-41af-cbec-42fe0625b4d1"
102    },
103    "outputs": [],
104    "source": [
105     "# we show here the hist\n",
106     "df_Ellis.hist(bins=100,figsize=(20,15))\n",
107     "#save_fig(\"attribute_histogram_plots\")\n",
108     "plt.show()"
109    ]
110   },
111   {
112    "cell_type": "code",
113    "execution_count": null,
114    "metadata": {
115     "colab": {
116      "base_uri": "https://localhost:8080/",
117      "height": 634
118     },
119     "id": "gggaMJ_2LtFs",
120     "outputId": "31ad3663-dfe7-4ac5-ddc1-bb5bd6e5bdfd"
121    },
122    "outputs": [],
123    "source": [
124     "cpu_system_perc = df_Ellis[['ellis-cpu.system_perc']] \n",
125     "cpu_system_perc.rolling(12).mean().plot(figsize=(20,10), linewidth=5, fontsize=20) \n",
126     "plt.xlabel('Timestamp', fontsize=30);"
127    ]
128   },
129   {
130    "cell_type": "code",
131    "execution_count": null,
132    "metadata": {
133     "colab": {
134      "base_uri": "https://localhost:8080/",
135      "height": 634
136     },
137     "id": "R_ctvXcQL1Xf",
138     "outputId": "dbc14537-5eb4-4433-f71b-8ad22f551fe8"
139    },
140    "outputs": [],
141    "source": [
142     "load_avg_1_min = df_Ellis[['ellis-load.avg_1_min']] \n",
143     "load_avg_1_min.rolling(12).mean().plot(figsize=(20,10), linewidth=5, fontsize=20) \n",
144     "plt.xlabel('Timestamp', fontsize=30);"
145    ]
146   },
147   {
148    "cell_type": "code",
149    "execution_count": null,
150    "metadata": {
151     "colab": {
152      "base_uri": "https://localhost:8080/",
153      "height": 634
154     },
155     "id": "Gkd5ecCmL6Bw",
156     "outputId": "af90f36e-850f-4ae6-f350-c7a55c2db05b"
157    },
158    "outputs": [],
159    "source": [
160     "cpu_wait_perc = df_Ellis[['ellis-cpu.wait_perc']] \n",
161     "cpu_wait_perc.rolling(12).mean().plot(figsize=(20,10), linewidth=5, fontsize=20) \n",
162     "plt.xlabel('Year', fontsize=30);"
163    ]
164   },
165   {
166    "cell_type": "code",
167    "execution_count": null,
168    "metadata": {
169     "colab": {
170      "base_uri": "https://localhost:8080/",
171      "height": 624
172     },
173     "id": "EycZrQU0MBSX",
174     "outputId": "bcf199b4-b7f9-4a8c-ba2b-aff666038715"
175    },
176    "outputs": [],
177    "source": [
178     "df_dg = pd.concat([cpu_system_perc.rolling(12).mean(), load_avg_1_min.rolling(12).mean(),cpu_wait_perc.rolling(12).mean()], axis=1) \n",
179     "df_dg.plot(figsize=(20,10), linewidth=5, fontsize=20) \n",
180     "plt.xlabel('Year', fontsize=20); "
181    ]
182   },
183   {
184    "cell_type": "code",
185    "execution_count": null,
186    "metadata": {
187     "id": "YoQA_MIBMknS"
188    },
189    "outputs": [],
190    "source": []
191   },
192   {
193    "cell_type": "code",
194    "execution_count": null,
195    "metadata": {
196     "colab": {
197      "base_uri": "https://localhost:8080/",
198      "height": 710
199     },
200     "id": "Pi8UMMitMa3Q",
201     "outputId": "189c418a-6688-4f58-c7f4-1354e235add7"
202    },
203    "outputs": [],
204    "source": [
205     "# we establish the corrmartrice\n",
206     "import seaborn as sns\n",
207     "color = sns.color_palette()\n",
208     "sns.set_style('darkgrid')\n",
209     "\n",
210     "correaltionMatrice = df_Ellis.corr()\n",
211     "f, ax = plt.subplots(figsize=(20, 10))\n",
212     "sns.heatmap(correaltionMatrice, cbar=True, vmin=0, vmax=1, square=True, annot=True);\n",
213     "plt.show()"
214    ]
215   },
216   {
217    "cell_type": "code",
218    "execution_count": null,
219    "metadata": {
220     "colab": {
221      "base_uri": "https://localhost:8080/"
222     },
223     "id": "rkYwyKtXMvpy",
224     "outputId": "708e7b7c-10ce-4d4a-ebbb-894567d167c9"
225    },
226    "outputs": [],
227    "source": [
228     "df_Ellis.corrwith(df_Ellis['ellis-load.avg_1_min'])"
229    ]
230   },
231   {
232    "cell_type": "code",
233    "execution_count": null,
234    "metadata": {
235     "colab": {
236      "base_uri": "https://localhost:8080/",
237      "height": 235
238     },
239     "id": "5oQK-ddinvCM",
240     "outputId": "92bf698d-e2bd-4438-bd84-346da3cfa696"
241    },
242    "outputs": [],
243    "source": [
244     "## ## using multivariate feature \n",
245     "\n",
246     "features_3 = ['ellis-cpu.wait_perc', 'ellis-load.avg_1_min', 'ellis-net.in_bytes_sec', 'Label']\n",
247     "\n",
248     "features = df_Ellis[features_3]\n",
249     "features.index = df_Ellis['Timestamp']\n",
250     "features.head()"
251    ]
252   },
253   {
254    "cell_type": "code",
255    "execution_count": null,
256    "metadata": {
257     "colab": {
258      "base_uri": "https://localhost:8080/",
259      "height": 386
260     },
261     "id": "qbqn755fo81g",
262     "outputId": "91c28242-4c9a-4ce3-9649-501eafa9247e"
263    },
264    "outputs": [],
265    "source": [
266     "features.plot(subplots=True)"
267    ]
268   },
269   {
270    "cell_type": "code",
271    "execution_count": null,
272    "metadata": {
273     "id": "jJQD1x9psWCH"
274    },
275    "outputs": [],
276    "source": [
277     "features = features.values"
278    ]
279   },
280   {
281    "cell_type": "code",
282    "execution_count": null,
283    "metadata": {
284     "colab": {
285      "base_uri": "https://localhost:8080/"
286     },
287     "id": "xf8WCiykpUzN",
288     "outputId": "c34ed8fa-edd0-41d0-e041-29a0853a3370"
289    },
290    "outputs": [],
291    "source": [
292     "### standardize data\n",
293     "train_split = 141600\n",
294     "tf.random.set_seed(13)\n",
295     "\n",
296     "### standardize data\n",
297     "features_mean = features[:train_split].mean()\n",
298     "features_std = features[:train_split].std()\n",
299     "features  = (features - features_mean)/ features_std\n",
300     "\n",
301     "print(type(features))\n",
302     "print(features.shape)\n"
303    ]
304   },
305   {
306    "cell_type": "code",
307    "execution_count": null,
308    "metadata": {
309     "id": "1a0hNDmppnLB"
310    },
311    "outputs": [],
312    "source": [
313     "### create mutlivariate data\n",
314     "\n",
315     "def mutlivariate_data(features , target , start_idx , end_idx , history_size , target_size,\n",
316     "                      step ,  single_step = False):\n",
317     "  data = []\n",
318     "  labels = []\n",
319     "  start_idx = start_idx + history_size\n",
320     "  if end_idx is None:\n",
321     "    end_idx = len(features)- target_size\n",
322     "  for i in range(start_idx , end_idx ):\n",
323     "    idxs = range(i-history_size, i, step) ### using step\n",
324     "    data.append(features[idxs])\n",
325     "    if single_step:\n",
326     "      labels.append(target[i+target_size])\n",
327     "    else:\n",
328     "      labels.append(target[i:i+target_size])\n",
329     "\n",
330     "  return np.array(data) , np.array(labels)"
331    ]
332   },
333   {
334    "cell_type": "code",
335    "execution_count": null,
336    "metadata": {
337     "colab": {
338      "base_uri": "https://localhost:8080/"
339     },
340     "id": "Z0CivgkitfgE",
341     "outputId": "fd7abe27-940d-4f71-e581-5deb3ac42262"
342    },
343    "outputs": [],
344    "source": [
345     "### generate multivariate data\n",
346     "\n",
347     "history = 720\n",
348     "future_target = 72\n",
349     "STEP = 6\n",
350     "\n",
351     "x_train_ss , y_train_ss = mutlivariate_data(features , features[:, 1], 0, train_split, history,\n",
352     "                                            future_target, STEP , single_step = True)\n",
353     "\n",
354     "x_val_ss , y_val_ss = mutlivariate_data(features , features[:,1] , train_split , None , history ,\n",
355     "                                        future_target, STEP, single_step = True)\n",
356     "\n",
357     "print(x_train_ss.shape , y_train_ss.shape)\n",
358     "print(x_val_ss.shape , y_val_ss.shape)"
359    ]
360   },
361   {
362    "cell_type": "code",
363    "execution_count": null,
364    "metadata": {
365     "colab": {
366      "base_uri": "https://localhost:8080/"
367     },
368     "id": "VBdr2epGu3aq",
369     "outputId": "3c3a50d3-6f47-400e-edd0-18a46154f7fc"
370    },
371    "outputs": [],
372    "source": [
373     "## tensorflow dataset\n",
374     "batch_size = 256\n",
375     "buffer_size = 10000\n",
376     "\n",
377     "train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))\n",
378     "train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()\n",
379     "\n",
380     "val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))\n",
381     "val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()\n",
382     "\n",
383     "print(train_ss)\n",
384     "print(val_ss)"
385    ]
386   },
387   {
388    "cell_type": "code",
389    "execution_count": null,
390    "metadata": {
391     "id": "9eQpwUyGglu_"
392    },
393    "outputs": [],
394    "source": [
395     "def root_mean_squared_error(y_true, y_pred):\n",
396     "        return K.sqrt(K.mean(K.square(y_pred - y_true))) "
397    ]
398   },
399   {
400    "cell_type": "code",
401    "execution_count": null,
402    "metadata": {
403     "id": "1cKtTAzqyiyL"
404    },
405    "outputs": [],
406    "source": [
407     "from keras.layers import Activation, Dense, Dropout\n",
408     "from keras.utils.vis_utils import plot_model\n",
409     "from keras.layers import Flatten\n",
410     "from keras.layers.convolutional import Conv1D\n",
411     "from keras.layers.convolutional import MaxPooling1D\n",
412     "### Modelling using LSTM\n",
413     "steps = 50\n",
414     "\n",
415     "EPOCHS =20\n",
416     "\n",
417     "single_step_model = tf.keras.models.Sequential()\n",
418     "\n",
419     "single_step_model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(1,48))\n",
420     "single_step_model.add(MaxPooling1D(pool_size=2))\n",
421     "single_step_model.add(Flatten())\n",
422     "single_step_model.add(Dense(50, activation='relu'))\n",
423     "single_step_model.add(Dense(1))\n",
424     "single_step_model.compile(optimizer='adam', loss='mae', metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])\n",
425     "\n",
426     "\n",
427     "\n",
428     "#single_step_model.add(tf.keras.layers.LSTM(32, return_sequences=False, input_shape = x_train_ss.shape[-2:]))\n",
429     "#single_step_model.add(tf.keras.layers.Dropout(0.3))\n",
430     "#single_step_model.add(tf.keras.layers.Dense(1))\n",
431     "#single_step_model.compile(optimizer = tf.keras.optimizers.Adam(), loss = 'mae',metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])\n",
432     "#single_step_model.compile(loss='mse', optimizer='rmsprop')\n",
433     "single_step_model_history = single_step_model.fit(train_ss, epochs = EPOCHS , \n",
434     "                                                  steps_per_epoch =steps, validation_data = val_ss,\n",
435     "                                                  validation_steps = 50)\n",
436     "single_step_model.summary()\n",
437     "plot_model(single_step_model, to_file='/content/drive/MyDrive/Failure/lstm/CNN-LSTM.png', show_shapes=True, show_layer_names=True)\n",
438     "\n"
439    ]
440   },
441   {
442    "cell_type": "code",
443    "execution_count": null,
444    "metadata": {
445     "colab": {
446      "base_uri": "https://localhost:8080/",
447      "height": 281
448     },
449     "id": "Pgev0dgzzBVx",
450     "outputId": "81b15b3b-0132-428e-d545-d689caa2e2eb"
451    },
452    "outputs": [],
453    "source": [
454     "## plot train test loss \n",
455     "\n",
456     "def plot_loss(history , title):\n",
457     "  loss = history.history['loss']\n",
458     "  val_loss = history.history['val_loss']\n",
459     "\n",
460     "  epochs = range(len(loss))\n",
461     "  plt.figure()\n",
462     "  plt.plot(epochs, loss , 'b' , label = 'Train Loss')\n",
463     "  plt.plot(epochs, val_loss , 'r' , label = 'Validation Loss')\n",
464     "  plt.title(title)\n",
465     "  plt.legend()\n",
466     "  plt.grid()\n",
467     "  plt.show()\n",
468     "\n",
469     "plot_loss(single_step_model_history , 'Single Step Training and validation loss')"
470    ]
471   },
472   {
473    "cell_type": "code",
474    "execution_count": null,
475    "metadata": {
476     "colab": {
477      "base_uri": "https://localhost:8080/",
478      "height": 281
479     },
480     "id": "EnYf6j4okEoC",
481     "outputId": "89bda316-e762-4aef-b32d-e7649da9ac39"
482    },
483    "outputs": [],
484    "source": [
485     "## plot train test loss \n",
486     "\n",
487     "def plot_loss(history , title):\n",
488     "  loss = history.history['rmse']\n",
489     "  val_loss = history.history['val_rmse']\n",
490     "\n",
491     "  epochs = range(len(loss))\n",
492     "  plt.figure()\n",
493     "  plt.plot(epochs, loss , 'b' , label = 'Train RMSE')\n",
494     "  plt.plot(epochs, val_loss , 'r' , label = 'Validation RMSE')\n",
495     "  plt.title(title)\n",
496     "  plt.legend()\n",
497     "  plt.grid()\n",
498     "  plt.show()\n",
499     "\n",
500     "plot_loss(single_step_model_history , 'Single Step Training and validation loss')"
501    ]
502   },
503   {
504    "cell_type": "code",
505    "execution_count": null,
506    "metadata": {
507     "id": "WMegV8mNAwe_"
508    },
509    "outputs": [],
510    "source": [
511     "### fucntion to create time steps\n",
512     "def create_time_steps(length):\n",
513     "  return list(range(-length,0))\n",
514     "\n",
515     "### function to plot time series data\n",
516     "\n",
517     "def plot_time_series(plot_data, delta , title):\n",
518     "  labels = [\"History\" , 'True Future' , 'Model Predcited']\n",
519     "  marker = ['.-' , 'rx' , 'go']\n",
520     "  time_steps = create_time_steps(plot_data[0].shape[0])\n",
521     "\n",
522     "  if delta:\n",
523     "    future = delta\n",
524     "  else:\n",
525     "    future = 0\n",
526     "  plt.title(title)\n",
527     "  for i , x in enumerate(plot_data):\n",
528     "    if i :\n",
529     "      plt.plot(future , plot_data[i] , marker[i], markersize = 10 , label = labels[i])\n",
530     "    else:\n",
531     "      plt.plot(time_steps, plot_data[i].flatten(), marker[i], label = labels[i])\n",
532     "  plt.legend()\n",
533     "  plt.xlim([time_steps[0], (future+5) *2])\n",
534     "\n",
535     "  plt.xlabel('Time_Step')\n",
536     "  return plt"
537    ]
538   },
539   {
540    "cell_type": "code",
541    "execution_count": null,
542    "metadata": {
543     "id": "q99i2c-9XKF3"
544    },
545    "outputs": [],
546    "source": [
547     "### Moving window average\n",
548     "\n",
549     "def MWA(history):\n",
550     "  return np.mean(history)"
551    ]
552   },
553   {
554    "cell_type": "code",
555    "execution_count": null,
556    "metadata": {
557     "colab": {
558      "base_uri": "https://localhost:8080/",
559      "height": 1000
560     },
561     "id": "xFJT1rZDAUVL",
562     "outputId": "a045ab0c-628b-456c-9ef6-5b92926358c6"
563    },
564    "outputs": [],
565    "source": [
566     "# plot time series and predicted values\n",
567     "\n",
568     "for x, y in val_ss.take(5):\n",
569     "  plot = plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),\n",
570     "                    single_step_model.predict(x)[0]], 12,\n",
571     "                   'Single Step Prediction')\n",
572     "  plot.show()"
573    ]
574   },
575   {
576    "cell_type": "markdown",
577    "metadata": {
578     "id": "_KXWQVmyCSix"
579    },
580    "source": [
581     "# **MultiStep Forcasting**"
582    ]
583   },
584   {
585    "cell_type": "code",
586    "execution_count": null,
587    "metadata": {
588     "colab": {
589      "base_uri": "https://localhost:8080/"
590     },
591     "id": "Lu7m2Rr4AbMK",
592     "outputId": "8e070f0a-fc5e-47a3-e330-ea8c8803c2c8"
593    },
594    "outputs": [],
595    "source": [
596     "future_target = 72 # 72 future values\n",
597     "x_train_multi, y_train_multi = mutlivariate_data(features, features[:, 1], 0,\n",
598     "                                                 train_split, history,\n",
599     "                                                 future_target, STEP)\n",
600     "x_val_multi, y_val_multi = mutlivariate_data(features, features[:, 1],\n",
601     "                                             train_split, None, history,\n",
602     "                                             future_target, STEP)\n",
603     "\n",
604     "print(x_train_multi.shape)\n",
605     "print(y_train_multi.shape)"
606    ]
607   },
608   {
609    "cell_type": "code",
610    "execution_count": null,
611    "metadata": {
612     "id": "GLRv5D16CrHj"
613    },
614    "outputs": [],
615    "source": [
616     "#  TF DATASET\n",
617     "\n",
618     "train_data_multi = tf.data.Dataset.from_tensor_slices((x_train_multi, y_train_multi))\n",
619     "train_data_multi = train_data_multi.cache().shuffle(buffer_size).batch(batch_size).repeat()\n",
620     "\n",
621     "val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))\n",
622     "val_data_multi = val_data_multi.batch(batch_size).repeat()"
623    ]
624   },
625   {
626    "cell_type": "code",
627    "execution_count": null,
628    "metadata": {
629     "colab": {
630      "base_uri": "https://localhost:8080/"
631     },
632     "id": "fjXexah9C8yg",
633     "outputId": "ed05331e-fdf2-460f-a2f5-4bac3be78bc8"
634    },
635    "outputs": [],
636    "source": [
637     "print(train_data_multi)\n",
638     "print(val_data_multi)"
639    ]
640   },
641   {
642    "cell_type": "code",
643    "execution_count": null,
644    "metadata": {
645     "colab": {
646      "base_uri": "https://localhost:8080/",
647      "height": 385
648     },
649     "id": "7mtLZ6S-DPU-",
650     "outputId": "02416632-1109-425e-af81-0f17a79e3120"
651    },
652    "outputs": [],
653    "source": [
654     "#plotting function\n",
655     "def multi_step_plot(history, true_future, prediction):\n",
656     "  plt.figure(figsize=(12, 6))\n",
657     "  num_in = create_time_steps(len(history))\n",
658     "  num_out = len(true_future)\n",
659     "  plt.grid()\n",
660     "  plt.plot(num_in, np.array(history[:, 1]), label='History')\n",
661     "  plt.plot(np.arange(num_out)/STEP, np.array(true_future), 'bo',\n",
662     "           label='True Future')\n",
663     "  if prediction.any():\n",
664     "    plt.plot(np.arange(num_out)/STEP, np.array(prediction), 'ro',\n",
665     "             label='Predicted Future')\n",
666     "  plt.legend(loc='upper left')\n",
667     "  plt.show()\n",
668     "  \n",
669     "\n",
670     "\n",
671     "for x, y in train_data_multi.take(1):\n",
672     "  multi_step_plot(x[0], y[0], np.array([0]))"
673    ]
674   },
675   {
676    "cell_type": "code",
677    "execution_count": null,
678    "metadata": {
679     "colab": {
680      "base_uri": "https://localhost:8080/"
681     },
682     "id": "snN_Flr5DWQN",
683     "outputId": "87a55e55-7cfd-4c10-9b15-db992065dd67"
684    },
685    "outputs": [],
686    "source": [
687     "multi_step_model = tf.keras.models.Sequential()\n",
688     "\n",
689     "\n",
690     "multi_step_model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=x_train_ss.shape[-2:]))\n",
691     "multi_step_model.add(MaxPooling1D(pool_size=2))\n",
692     "multi_step_model.add(Flatten())\n",
693     "multi_step_model.add(Dense(50, activation='relu'))\n",
694     "multi_step_model.add(Dense(1))\n",
695     "multi_step_model.compile(optimizer='adam', loss='mae',metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])\n",
696     "\n",
697     "\n",
698     "#multi_step_model.add(tf.keras.layers.LSTM(32,\n",
699     " #                                         return_sequences=True,\n",
700     "  #                                        input_shape=x_train_multi.shape[-2:]))\n",
701     "#multi_step_model.add(tf.keras.layers.LSTM(16, activation='relu'))\n",
702     "#aDD dropout layer (0.3)\n",
703     "#multi_step_model.add(tf.keras.layers.Dense(72)) # for 72 outputs\n",
704     "\n",
705     "#multi_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(clipvalue=1.0), loss='mae',metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])\n",
706     "\n",
707     "multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,\n",
708     "                                          steps_per_epoch=steps,\n",
709     "                                          validation_data=val_data_multi,\n",
710     "                                          validation_steps=50)"
711    ]
712   },
713   {
714    "cell_type": "code",
715    "execution_count": null,
716    "metadata": {
717     "colab": {
718      "base_uri": "https://localhost:8080/",
719      "height": 281
720     },
721     "id": "Ay5m27doDsTt",
722     "outputId": "9b8f5274-03f9-427f-84ce-6dfafc9e7b45"
723    },
724    "outputs": [],
725    "source": [
726     "plot_loss(multi_step_history, 'Multi-Step Training and validation loss')"
727    ]
728   },
729   {
730    "cell_type": "code",
731    "execution_count": null,
732    "metadata": {
733     "id": "6ZFP49W4D2wp"
734    },
735    "outputs": [],
736    "source": [
737     "for x, y in val_data_multi.take(5):\n",
738     "  multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])"
739    ]
740   },
741   {
742    "cell_type": "code",
743    "execution_count": null,
744    "metadata": {
745     "colab": {
746      "base_uri": "https://localhost:8080/"
747     },
748     "id": "DNKMjVoAVqZP",
749     "outputId": "403d8d4d-ca00-40d2-a179-f13879819f15"
750    },
751    "outputs": [],
752    "source": [
753     "scores = multi_step_model.evaluate(x_train_multi, y_train_multi, verbose=1, batch_size=200)\n",
754     "print('MAE: {}'.format(scores[1]))"
755    ]
756   },
757   {
758    "cell_type": "code",
759    "execution_count": null,
760    "metadata": {
761     "colab": {
762      "base_uri": "https://localhost:8080/"
763     },
764     "id": "oDXXSFLy07gH",
765     "outputId": "47b9dcf3-cc67-43d3-e88a-8b51ea85a993"
766    },
767    "outputs": [],
768    "source": [
769     "scores_test = multi_step_model.evaluate(x_val_multi, y_val_multi, verbose=1, batch_size=200)\n",
770     "print('MAE: {}'.format(scores[1]))\n"
771    ]
772   }
773  ],
774  "metadata": {
775   "colab": {
776    "collapsed_sections": [],
777    "name": "CNN.ipynb",
778    "provenance": []
779   },
780   "kernelspec": {
781    "display_name": "Python 3 (ipykernel)",
782    "language": "python",
783    "name": "python3"
784   },
785   "language_info": {
786    "codemirror_mode": {
787     "name": "ipython",
788     "version": 3
789    },
790    "file_extension": ".py",
791    "mimetype": "text/x-python",
792    "name": "python",
793    "nbconvert_exporter": "python",
794    "pygments_lexer": "ipython3",
795    "version": "3.9.7"
796   }
797  },
798  "nbformat": 4,
799  "nbformat_minor": 1
800 }