|
@@ -8,7 +8,9 @@
|
8
|
8
|
# because dataTrain is not stacke before create_sequences, so,
|
9
|
9
|
# the sets are not aligned in time
|
10
|
10
|
|
11
|
|
-# Optimizxation of threshold factor changed, bacause is based on F1-Score AKI
|
|
11
|
+# Because in this case we don't observe into transitories, we keep independently each train set
|
|
12
|
+
|
|
13
|
+# Optimizxation of threshold factor changed, bacause is based on F1-Score
|
12
|
14
|
|
13
|
15
|
import pandas as pd
|
14
|
16
|
import matplotlib.pyplot as plt
|
|
@@ -69,6 +71,7 @@ datafiles[4]=['2024-12-28_5_','2024-12-29_5_','2024-12-30_5_','2024-12-31_5_','2
|
69
|
71
|
|
70
|
72
|
features=['r1 s1','r1 s4','r1 s5','pa1 apiii']
|
71
|
73
|
features=['r1 s1','r1 s4','r1 s5']
|
|
74
|
+features=['r1 s1','r1 s5']
|
72
|
75
|
featureNames={}
|
73
|
76
|
featureNames['r1 s1']='$T_{evap}$'
|
74
|
77
|
featureNames['r1 s4']='$T_{cond}$'
|
|
@@ -185,7 +188,6 @@ def anomalyMetric(th,ts,testList): # first of list is non failure data
|
185
|
188
|
# Precision: Rate of positive results: TP/(TP+FP)
|
186
|
189
|
# F1-score: predictive performance measure: 2*Precision*Sensitity/(Precision+Sensitity)
|
187
|
190
|
# F2-score: predictive performance measure: 2*Specificity*Sensitity/(Specificity+Sensitity)
|
188
|
|
-
|
189
|
191
|
x_test = create_sequences(testList[0],ts)
|
190
|
192
|
x_test_pred = model.predict(x_test)
|
191
|
193
|
test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1)
|
|
@@ -214,31 +216,28 @@ def anomalyMetric(th,ts,testList): # first of list is non failure data
|
214
|
216
|
FN[i-1] = anomalies.shape[0]-count
|
215
|
217
|
Sensitivity[i-1]=TP[i-1]/(TP[i-1]+FN[i-1])
|
216
|
218
|
Precision[i-1]=TP[i-1]/(TP[i-1]+FP)
|
217
|
|
-
|
218
|
219
|
GlobalSensitivity=TP.sum()/(TP.sum()+FN.sum())
|
219
|
220
|
Specificity=TN/(TN+FP)
|
220
|
221
|
Accuracy=(TN+TP.sum())/(TN+TP.sum()+FP+FN.sum())
|
221
|
222
|
GlobalPrecision=TP.sum()/(TP.sum()+FP)
|
222
|
223
|
F1Score= 2*GlobalPrecision*GlobalSensitivity/(GlobalPrecision+GlobalSensitivity)
|
223
|
224
|
F2Score = 2*Specificity*GlobalSensitivity/(Specificity+GlobalSensitivity)
|
224
|
|
-
|
225
|
|
- print("Sensitivity: ",Sensitivity)
|
|
225
|
+ print("Global Precision: ",GlobalPrecision)
|
|
226
|
+ print("Precision: ",Precision)
|
226
|
227
|
print("Global Sensitivity: ",GlobalSensitivity)
|
227
|
|
- #print("Precision: ",Precision)
|
228
|
|
- #print("Global Precision: ",GlobalPrecision)
|
229
|
|
- print("Specifity: ",Specificity)
|
|
228
|
+ print("Sensitivity: ",Sensitivity)
|
|
229
|
+ #print("Specifity: ",Specificity)
|
230
|
230
|
#print("Accuracy: ",Accuracy)
|
231
|
|
- #print("F1Score: ",F1Score)
|
232
|
|
- print("F2Score: ",F2Score)
|
|
231
|
+ print("F1Score: ",F1Score)
|
|
232
|
+ #print("F2Score: ",F2Score)
|
233
|
233
|
#print("FP: ",FP)
|
234
|
234
|
#return Sensitivity+Specifity
|
235
|
|
- return F2Score
|
|
235
|
+ return F1Score
|
236
|
236
|
|
237
|
237
|
FScoreHash={}
|
238
|
238
|
threshold={}
|
239
|
239
|
def getFScore(timestep,datalist):
|
240
|
240
|
FScoreHash[timestep]=[]
|
241
|
|
- # plots FSCore as a function of Threshold Factor
|
242
|
241
|
tf=0.3
|
243
|
242
|
while tf<8:
|
244
|
243
|
th=threshold[timestep]*tf
|
|
@@ -257,7 +256,7 @@ def plotFScore(FS):
|
257
|
256
|
ar=np.array((FS[k]))
|
258
|
257
|
axes.plot(ar[:,0],ar[:,1],label="$ns=$"+str(k),linewidth=3)
|
259
|
258
|
axes.set_xlabel("Threshold factor ($tf$)")
|
260
|
|
- axes.set_ylabel("FScore")
|
|
259
|
+ axes.set_ylabel("F1-Score")
|
261
|
260
|
axes.legend()
|
262
|
261
|
axes.grid()
|
263
|
262
|
s='['
|
|
@@ -319,7 +318,7 @@ if options.train: # Train not needed to be changed
|
319
|
318
|
)
|
320
|
319
|
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse")
|
321
|
320
|
model.summary()
|
322
|
|
- path_checkpoint="model_noclass_v2_"+str(timesteps)+listToString(features)+"_checkpoint.weights.h5"
|
|
321
|
+ path_checkpoint="model_noclass_v4_"+str(timesteps)+listToString(features)+"_checkpoint.weights.h5"
|
323
|
322
|
es_callback=keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=15)
|
324
|
323
|
modelckpt_callback=keras.callbacks.ModelCheckpoint( monitor="val_loss", filepath=path_checkpoint, verbose=1, save_weights_only=True, save_best_only=True,)
|
325
|
324
|
|
|
@@ -328,12 +327,12 @@ if options.train: # Train not needed to be changed
|
328
|
327
|
x_train_pred=model.predict(x_train[0])
|
329
|
328
|
train_mae_loss=np.mean(np.abs(x_train_pred - x_train[0]), axis=1)
|
330
|
329
|
threshold[timesteps]=np.max(train_mae_loss,axis=0)
|
331
|
|
- file = open('threshold'+listToString(features)+'.pk', 'wb')
|
|
330
|
+ file = open('threshold_v4_'+listToString(features)+'.pk', 'wb')
|
332
|
331
|
pickle.dump(threshold, file)
|
333
|
332
|
file.close()
|
334
|
333
|
exit(0)
|
335
|
334
|
else:
|
336
|
|
- file = open('threshold'+listToString(features)+'.pk', 'rb')
|
|
335
|
+ file = open('threshold_v4_'+listToString(features)+'.pk', 'rb')
|
337
|
336
|
threshold=pickle.load(file)
|
338
|
337
|
file.close()
|
339
|
338
|
|
|
@@ -384,23 +383,23 @@ else:
|
384
|
383
|
|
385
|
384
|
if options.optimizetf:
|
386
|
385
|
for timesteps in range(4,21,4):
|
387
|
|
- path_checkpoint="model_noclass_v2_"+str(timesteps)+listToString(features)+"_checkpoint.weights.h5"
|
|
386
|
+ path_checkpoint="model_noclass_v4_"+str(timesteps)+listToString(features)+"_checkpoint.weights.h5"
|
388
|
387
|
es_callback=keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=15)
|
389
|
388
|
modelckpt_callback=keras.callbacks.ModelCheckpoint( monitor="val_loss", filepath=path_checkpoint, verbose=1, save_weights_only=True, save_best_only=True,)
|
390
|
389
|
model.load_weights(path_checkpoint)
|
391
|
390
|
getFScore(timesteps,[dataTestNorm[0],dataTrainNorm[1],dataTrainNorm[2],dataTrainNorm[3],dataTrainNorm[4]])
|
392
|
|
- file = open('FScore'+listToString(features)+'.pk', 'wb')
|
|
391
|
+ file = open('FScore_v4_'+listToString(features)+'.pk', 'wb')
|
393
|
392
|
pickle.dump(FScoreHash, file)
|
394
|
393
|
file.close()
|
395
|
394
|
|
396
|
395
|
|
397
|
|
- path_checkpoint="model_noclass_v2_"+str(options.timesteps)+listToString(features)+"_checkpoint.weights.h5"
|
|
396
|
+ path_checkpoint="model_noclass_v4_"+str(options.timesteps)+listToString(features)+"_checkpoint.weights.h5"
|
398
|
397
|
es_callback=keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=15)
|
399
|
398
|
modelckpt_callback=keras.callbacks.ModelCheckpoint( monitor="val_loss", filepath=path_checkpoint, verbose=1, save_weights_only=True, save_best_only=True,)
|
400
|
399
|
model.load_weights(path_checkpoint)
|
401
|
400
|
|
402
|
401
|
|
403
|
|
- file = open('FScore'+listToString(features)+'.pk', 'rb')
|
|
402
|
+ file = open('FScore_v4_'+listToString(features)+'.pk', 'rb')
|
404
|
403
|
FS=pickle.load(file)
|
405
|
404
|
file.close()
|
406
|
405
|
|
|
@@ -415,20 +414,21 @@ TIME_STEPS=int(options.timesteps)
|
415
|
414
|
# For Failure data, we can use Train data becasue not used for training and includes the firsts samples
|
416
|
415
|
#datalist=[dataTestNorm[0],dataTrainNorm[1],dataTrainNorm[2],dataTrainNorm[3],dataTrainNorm[4]]
|
417
|
416
|
datalist=[dataTestNorm[0],dataTestNorm[1],dataTestNorm[2],dataTestNorm[3],dataTestNorm[4]]
|
418
|
|
-d=np.vstack((datalist))
|
419
|
417
|
|
420
|
|
-x_test = create_sequences(d,int(options.timesteps))
|
|
418
|
+
|
|
419
|
+x_test=create_sequences(datalist[0],int(options.timesteps))
|
|
420
|
+for i in range(1,len(datalist)):
|
|
421
|
+ x_test=np.vstack((x_test,create_sequences(datalist[i],int(options.timesteps))))
|
421
|
422
|
x_test_pred = model.predict(x_test)
|
422
|
423
|
test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1)
|
423
|
424
|
|
424
|
425
|
|
425
|
426
|
# Define ranges for plotting in different colors
|
426
|
427
|
testRanges=[]
|
427
|
|
-
|
428
|
428
|
r=0
|
429
|
429
|
for i in range(len(datalist)):
|
430
|
|
- testRanges.append([r,r+datalist[i].shape[0]])
|
431
|
|
- r+=datalist[i].shape[0]
|
|
430
|
+ testRanges.append([r,r+datalist[i].shape[0]-int(options.timesteps)])
|
|
431
|
+ r+=datalist[i].shape[0]-int(options.timesteps)
|
432
|
432
|
|
433
|
433
|
#r=dataTestNorm[0].shape[0]
|
434
|
434
|
#testRanges.append([0,r])
|
|
@@ -437,8 +437,6 @@ for i in range(len(datalist)):
|
437
|
437
|
# testRanges.append([r,rnext] )
|
438
|
438
|
# r=rnext
|
439
|
439
|
|
440
|
|
-# Drop the last TIME_STEPS for plotting
|
441
|
|
-testRanges[NumberOfFailures][1]=testRanges[NumberOfFailures][1]-TIME_STEPS
|
442
|
440
|
|
443
|
441
|
|
444
|
442
|
anomalies = test_mae_loss > threshold[int(options.timesteps)]*float(options.TF)
|
|
@@ -470,12 +468,11 @@ def plotData3():
|
470
|
468
|
x=[]
|
471
|
469
|
y=[]
|
472
|
470
|
for k in anomalous_data_indices:
|
473
|
|
- if (k+TIME_STEPS)<x_test.shape[0]:
|
474
|
|
- x.append(k+TIME_STEPS)
|
475
|
|
- y.append(x_test[k+TIME_STEPS,0,indexesToPlot[i]]*stdevs[i]+means[i])
|
|
471
|
+ if (k)<x_test.shape[0]:
|
|
472
|
+ x.append(k)
|
|
473
|
+ y.append(x_test[k,0,indexesToPlot[i]]*stdevs[i]+means[i])
|
476
|
474
|
axes[i].plot(x,y ,color='black',marker='.',linewidth=0,label="Fail detection" )
|
477
|
475
|
|
478
|
|
-
|
479
|
476
|
init=0
|
480
|
477
|
end=testRanges[0][1]
|
481
|
478
|
axes[i].plot(range(init,end),x_test[testRanges[0][0]:testRanges[0][1],0,indexesToPlot[i]]*stdevs[i]+means[i],label="No fail")
|
|
@@ -487,7 +484,6 @@ def plotData3():
|
487
|
484
|
init=end
|
488
|
485
|
end+=(testRanges[j+1][1]-testRanges[j+1][0])
|
489
|
486
|
|
490
|
|
-
|
491
|
487
|
if i==(NumFeatures-1):
|
492
|
488
|
axes[i].legend(loc='right')
|
493
|
489
|
s=''
|
|
@@ -505,199 +501,6 @@ anomalyMetric(threshold[int(options.timesteps)]*float(options.TF), int(options.t
|
505
|
501
|
plotData3()
|
506
|
502
|
|
507
|
503
|
|
508
|
|
-def plotData5():
|
509
|
|
- model1 = keras.Sequential(
|
510
|
|
- [
|
511
|
|
- layers.Input(shape=(4, 3)),
|
512
|
|
- layers.Conv1D(
|
513
|
|
- filters=NumFilters,
|
514
|
|
- kernel_size=KernelSize,
|
515
|
|
- padding="same",
|
516
|
|
- strides=2,
|
517
|
|
- activation="relu",
|
518
|
|
- ),
|
519
|
|
- layers.Dropout(rate=DropOut),
|
520
|
|
- layers.Conv1D(
|
521
|
|
- filters=int(NumFilters/2),
|
522
|
|
- kernel_size=KernelSize,
|
523
|
|
- padding="same",
|
524
|
|
- strides=2,
|
525
|
|
- activation="relu",
|
526
|
|
- ),
|
527
|
|
- layers.Conv1DTranspose(
|
528
|
|
- filters=int(NumFilters/2),
|
529
|
|
- kernel_size=KernelSize,
|
530
|
|
- padding="same",
|
531
|
|
- strides=2,
|
532
|
|
- activation="relu",
|
533
|
|
- ),
|
534
|
|
- layers.Dropout(rate=DropOut),
|
535
|
|
- layers.Conv1DTranspose(
|
536
|
|
- filters=NumFilters,
|
537
|
|
- kernel_size=KernelSize,
|
538
|
|
- padding="same",
|
539
|
|
- strides=2,
|
540
|
|
- activation="relu",
|
541
|
|
- ),
|
542
|
|
- layers.Conv1DTranspose(filters=3, kernel_size=KernelSize, padding="same"),
|
543
|
|
- ]
|
544
|
|
- )
|
545
|
|
- model1.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse")
|
546
|
|
- model1.summary()
|
547
|
|
- path_checkpoint="model_noclass_v2_"+str(4)+listToString(features)+"_checkpoint.weights.h5"
|
548
|
|
- es_callback=keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=15)
|
549
|
|
- modelckpt_callback=keras.callbacks.ModelCheckpoint( monitor="val_loss", filepath=path_checkpoint, verbose=1, save_weights_only=True, save_best_only=True,)
|
550
|
|
- model1.load_weights(path_checkpoint)
|
551
|
|
-
|
552
|
|
- model2 = keras.Sequential(
|
553
|
|
- [
|
554
|
|
- layers.Input(shape=(20, 3)),
|
555
|
|
- layers.Conv1D(
|
556
|
|
- filters=NumFilters,
|
557
|
|
- kernel_size=KernelSize,
|
558
|
|
- padding="same",
|
559
|
|
- strides=2,
|
560
|
|
- activation="relu",
|
561
|
|
- ),
|
562
|
|
- layers.Dropout(rate=DropOut),
|
563
|
|
- layers.Conv1D(
|
564
|
|
- filters=int(NumFilters/2),
|
565
|
|
- kernel_size=KernelSize,
|
566
|
|
- padding="same",
|
567
|
|
- strides=2,
|
568
|
|
- activation="relu",
|
569
|
|
- ),
|
570
|
|
- layers.Conv1DTranspose(
|
571
|
|
- filters=int(NumFilters/2),
|
572
|
|
- kernel_size=KernelSize,
|
573
|
|
- padding="same",
|
574
|
|
- strides=2,
|
575
|
|
- activation="relu",
|
576
|
|
- ),
|
577
|
|
- layers.Dropout(rate=DropOut),
|
578
|
|
- layers.Conv1DTranspose(
|
579
|
|
- filters=NumFilters,
|
580
|
|
- kernel_size=KernelSize,
|
581
|
|
- padding="same",
|
582
|
|
- strides=2,
|
583
|
|
- activation="relu",
|
584
|
|
- ),
|
585
|
|
- layers.Conv1DTranspose(filters=3, kernel_size=KernelSize, padding="same"),
|
586
|
|
- ]
|
587
|
|
- )
|
588
|
|
- model2.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse")
|
589
|
|
- model2.summary()
|
590
|
|
- path_checkpoint="model_noclass_v2_"+str(20)+listToString(features)+"_checkpoint.weights.h5"
|
591
|
|
- es_callback=keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=15)
|
592
|
|
- modelckpt_callback=keras.callbacks.ModelCheckpoint( monitor="val_loss", filepath=path_checkpoint, verbose=1, save_weights_only=True, save_best_only=True,)
|
593
|
|
- model2.load_weights(path_checkpoint)
|
594
|
|
-
|
595
|
|
- datalist=[dataTestNorm[0],dataTestNorm[3],dataTestNorm[2],dataTestNorm[1],dataTestNorm[4]]
|
596
|
|
- d=np.vstack((datalist))
|
597
|
|
- x_test = create_sequences(d,4)
|
598
|
|
- x_test_pred = model1.predict(x_test)
|
599
|
|
- test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1)
|
600
|
|
- testRanges=[]
|
601
|
|
- TIME_STEPS=4
|
602
|
|
- r=0
|
603
|
|
- for i in range(len(datalist)):
|
604
|
|
- testRanges.append([r,r+datalist[i].shape[0]])
|
605
|
|
- r+=datalist[i].shape[0]
|
606
|
|
- testRanges[NumberOfFailures][1]=testRanges[NumberOfFailures][1]-TIME_STEPS
|
607
|
|
- anomalies = test_mae_loss > threshold[4]*float(options.TF)
|
608
|
|
- anomalous_data_indices = []
|
609
|
|
- for i in range(anomalies.shape[0]):
|
610
|
|
- if AtLeastOneTrue(anomalies[i]):
|
611
|
|
- anomalous_data_indices.append(i)
|
612
|
|
-
|
613
|
|
- plt.rcParams.update({'font.size': 16})
|
614
|
|
- fig, axes = plt.subplots(
|
615
|
|
- nrows=2, ncols=1, figsize=(15, 7), dpi=80, facecolor="w", edgecolor="k" , sharex=True
|
616
|
|
- )
|
617
|
|
- for i in range(1):
|
618
|
|
- init=0
|
619
|
|
- end=testRanges[0][1]
|
620
|
|
- axes[i].plot(range(init,end),x_test[testRanges[0][0]:testRanges[0][1],0,indexesToPlot[i]]*stdevs[i]+means[i],label="No fail")
|
621
|
|
- init=end
|
622
|
|
- end+=(testRanges[1][1]-testRanges[1][0])
|
623
|
|
- for j in range(1,NumberOfFailures+1):
|
624
|
|
- axes[i].plot(range(init,end),x_test[testRanges[j][0]:testRanges[j][1],0,indexesToPlot[i]]*stdevs[i]+means[i],label="Fail type "+str(j), color=colorline[j-1])
|
625
|
|
- if j<NumberOfFailures:
|
626
|
|
- init=end
|
627
|
|
- end+=(testRanges[j+1][1]-testRanges[j+1][0])
|
628
|
|
- x=[]
|
629
|
|
- y=[]
|
630
|
|
- for k in anomalous_data_indices:
|
631
|
|
- if (k+TIME_STEPS)<x_test.shape[0]:
|
632
|
|
- x.append(k+TIME_STEPS)
|
633
|
|
- y.append(x_test[k+TIME_STEPS,0,indexesToPlot[i]]*stdevs[i]+means[i])
|
634
|
|
- axes[i].plot(x,y ,color='black',marker='.',linewidth=0,label="Fail detection" )
|
635
|
|
-
|
636
|
|
- if i==(NumFeatures-1):
|
637
|
|
- axes[i].legend(loc='right')
|
638
|
|
- s=''
|
639
|
|
- s+=featureNames[features[indexesToPlot[i]]]
|
640
|
|
- s+=' '+unitNames[features[indexesToPlot[i]]]
|
641
|
|
- axes[i].set_ylabel(s)
|
642
|
|
- axes[i].grid()
|
643
|
|
-
|
644
|
|
-
|
645
|
|
- x_test = create_sequences(d,20)
|
646
|
|
- x_test_pred = model2.predict(x_test)
|
647
|
|
- test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1)
|
648
|
|
- testRanges=[]
|
649
|
|
- r=0
|
650
|
|
- TIME_STEPS=20
|
651
|
|
- for i in range(len(datalist)):
|
652
|
|
- testRanges.append([r,r+datalist[i].shape[0]])
|
653
|
|
- r+=datalist[i].shape[0]
|
654
|
|
- testRanges[NumberOfFailures][1]=testRanges[NumberOfFailures][1]-TIME_STEPS
|
655
|
|
- anomalies = test_mae_loss > threshold[20]*float(options.TF)
|
656
|
|
- anomalous_data_indices = []
|
657
|
|
- for i in range(anomalies.shape[0]):
|
658
|
|
- if AtLeastOneTrue(anomalies[i]):
|
659
|
|
- anomalous_data_indices.append(i)
|
660
|
|
- print(testRanges)
|
661
|
|
- for i in range(1):
|
662
|
|
- init=0
|
663
|
|
- end=testRanges[0][1]
|
664
|
|
- axes[i+1].plot(range(init,end),x_test[testRanges[0][0]:testRanges[0][1],0,indexesToPlot[i]]*stdevs[i]+means[i],label="No fail")
|
665
|
|
- init=end
|
666
|
|
- end+=(testRanges[1][1]-testRanges[1][0])
|
667
|
|
- for j in range(1,NumberOfFailures+1):
|
668
|
|
- if j==1:
|
669
|
|
- axes[i+1].plot(range(init,end),x_test[testRanges[j][0]:testRanges[j][1],0,indexesToPlot[i]]*stdevs[i]+means[i],label="Fail type 3", color=colorline[j-1])
|
670
|
|
- else:
|
671
|
|
- axes[i+1].plot(range(init,end),x_test[testRanges[j][0]:testRanges[j][1],0,indexesToPlot[i]]*stdevs[i]+means[i], color=colorline[j-1])
|
672
|
|
- if j<NumberOfFailures:
|
673
|
|
- init=end
|
674
|
|
- end+=(testRanges[j+1][1]-testRanges[j+1][0])
|
675
|
|
- x=[]
|
676
|
|
- y=[]
|
677
|
|
- for k in anomalous_data_indices:
|
678
|
|
- if (k+TIME_STEPS)<x_test.shape[0]:
|
679
|
|
- x.append(k+TIME_STEPS)
|
680
|
|
- y.append(x_test[k+TIME_STEPS,0,indexesToPlot[i]]*stdevs[i]+means[i])
|
681
|
|
- axes[i+1].plot(x,y ,color='black',marker='.',linewidth=0,label="Fail detection" )
|
682
|
|
- if i==0:
|
683
|
|
- axes[i+1].legend(loc='right')
|
684
|
|
- s=''
|
685
|
|
- s+=featureNames[features[indexesToPlot[i]]]
|
686
|
|
- s+=' '+unitNames[features[indexesToPlot[i]]]
|
687
|
|
- axes[i+1].set_ylabel(s)
|
688
|
|
- axes[i+1].grid()
|
689
|
|
-
|
690
|
|
- axes[0].set_xlim(460,480)
|
691
|
|
- axes[1].set_xlim(460,480)
|
692
|
|
-
|
693
|
|
- axes[0].set_title('$ns=4$')
|
694
|
|
- axes[1].set_title('$ns=20$')
|
695
|
|
- axes[1].set_xlabel("Sample number")
|
696
|
|
- plt.show()
|
697
|
|
-
|
698
|
|
-
|
699
|
|
-
|
700
|
|
-plotData5()
|
701
|
504
|
exit(0)
|
702
|
505
|
|
703
|
506
|
|