Csar Fdez hace 4 días
padre
commit
ae4956b344
Se han modificado 1 ficheros con 414 adiciones y 0 borrados
  1. 414
    0
      v2_unsupervised.py

+ 414
- 0
v2_unsupervised.py Ver fichero

@@ -0,0 +1,414 @@
1
+# Csar Fdez, UdL, 2025
2
+#  Unsupervised classification. Uses tslearn
3
+#  https://tslearn.readthedocs.io/en/stable/index.html
4
+
5
+# Be careful with v0_unsupervised  and all versions for supervised.
6
+# because dataTrain is not stacke before create_sequences,  so, 
7
+#  the sets are not aligned in time
8
+
9
+# Same as v1 but changing data
10
+
11
+import pandas as pd
12
+import matplotlib.pyplot as plt
13
+import datetime
14
+import numpy as np
15
+import os.path
16
+from optparse import OptionParser
17
+import copy
18
+import pickle
19
+from tslearn.clustering import TimeSeriesKMeans
20
+from tslearn.neighbors import KNeighborsTimeSeries 
21
+from collections import Counter
22
+
23
+parser = OptionParser()
24
+parser.add_option("-t", "--train", dest="train", help="Trains the models (false)", default=False, action="store_true")
25
+parser.add_option("-n", "--timesteps", dest="timesteps", help="TIME STEPS ", default=12)
26
+
27
+(options, args) = parser.parse_args()
28
+
29
+
30
+# data files arrays. Index:
31
+# 0.  No failure
32
+# 1.  Blocked evaporator
33
+# 2.   Full Blocked condenser
34
+# 3   Fan condenser not working
35
+# 4.  Open door
36
+
37
+
38
+NumberOfFailures=4  
39
+datafiles={}
40
+listofFacilitySetpoint=['5-26','5-18','5-22','30','32','34']
41
+
42
+for j in listofFacilitySetpoint:
43
+    datafiles[j]=[]
44
+    for i in range(NumberOfFailures+1):
45
+        datafiles[j].append([])
46
+
47
+# Freezer, SP=-26
48
+datafiles['5-26'][0]=['2025-01-25_5_','2025-01-26_5_','2025-01-29_5_','2025-01-30_5_','2025-01-31_5_','2025-02-01_5_','2025-02-02_5_']
49
+datafiles['5-26'][1]=['2024-12-11_5_', '2024-12-12_5_','2024-12-13_5_','2024-12-14_5_','2024-12-15_5_']
50
+datafiles['5-26'][2]=['2024-12-18_5_','2024-12-19_5_']
51
+datafiles['5-26'][3]=['2024-12-28_5_','2024-12-29_5_','2024-12-30_5_','2024-12-31_5_','2025-01-01_5_'] 
52
+datafiles['5-26'][4]=['2025-02-13_5_','2025-02-14_5_','2025-02-15_5_','2025-02-16_5_','2025-02-17_5_','2025-02-18_5_','2025-02-19_5_'] 
53
+
54
+# Freezer, SP=-18
55
+datafiles['5-18'][0]=['2025-01-21_5_','2025-01-22_5_','2025-01-23_5_',] # no hi son aquestx arxius
56
+
57
+# Freezer, SP=-22
58
+datafiles['5-22'][0]=['2025-03-13_5_','2025-03-14_5_','2025-03-15_5_','2025-03-16_5_']  
59
+datafiles['5-22'][1]=['2025-03-23_5_','2025-03-24_5_','2025-03-25_5_']  # es solapa amb el seguent test 
60
+datafiles['5-22'][2]=[]  
61
+
62
+# Refrigerator  0  
63
+datafiles['30'][0]=['2025-01-21_3_','2025-01-22_3_','2025-01-23_3_','2025-01-24_3_','2025-01-25_3_','2025-01-26_3_']
64
+datafiles['30'][1]=['2024-12-11_3_','2024-12-12_3_','2024-12-13_3_','2024-12-14_3_','2024-12-15_3_']
65
+datafiles['30'][2]=['2024-12-18_3_','2024-12-19_3_','2024-12-20_3_']
66
+datafiles['30'][3]=['2024-12-28_3_','2024-12-29_3_','2024-12-30_3_','2024-12-31_3_']
67
+datafiles['30'][4]=['2025-02-12_3_','2025-02-13_3_','2025-02-14_3_','2025-02-15_3_','2025-02-16_3_','2025-02-17_3_','2025-02-18_3_','2025-02-19_3_']   # es solapa amb ventilador no funcionant. i els dies 20 i 21 no hi son
68
+
69
+
70
+# Refrigerator  2  
71
+datafiles['32'][0]=['2025-03-13_3_','2025-03-14_3_','2025-03-15_3_','2025-03-16_3_']
72
+datafiles['32'][1]=['2025-03-10_3_']
73
+datafiles['32'][2]=['2025-03-17_3_']
74
+datafiles['32'][3]=['2025-03-22_3_','2025-03-23_3_']
75
+datafiles['32'][4]=['2025-03-27_3_','2025-03-28_3_']
76
+
77
+
78
+# Refrigerator  4  
79
+datafiles['34'][0]=['2025-03-31_3_','2025-04-01_3_','2025-04-02_3_','2025-04-03_3_']
80
+datafiles['34'][1]=['2025-04-25_3_','2025-04-26_3_','2025-04-27_3_','2025-04-28_3_'] # aquestes dades no hi son
81
+datafiles['34'][2]=['2025-04-11_3_','2025-04-12_3_','2025-04-13_3_','2025-04-14_3_']
82
+datafiles['34'][3]=['2025-04-30_3_','2025-05-01_3_','2025-05-02_3_','2025-05-03_3_','2025-05-04_3_','2025-05-05_3_']
83
+datafiles['34'][4]=['2025-04-23_3_','2025-04-24_3_','2025-04-25_3_']
84
+
85
+
86
+
87
+# data files arrays. Index:
88
+# 0.  No failure
89
+# 1.  Blocked evaporator
90
+# 2.   Full Blocked condenser
91
+# 3   Fan condenser not working
92
+# 4.  Open door
93
+
94
+
95
+facilitySetpoint='32'
96
+# Features suggested by Xavier
97
+# Care with 'tc s3' because on datafiles[0] is always nulll
98
+# Seems to be incoropored in new tests
99
+
100
+#r1s5 supply air flow temperature
101
+#r1s1 inlet evaporator temperature
102
+#r1s4 condenser outlet
103
+
104
+# VAriables r1s4 and pa1 apiii  may not exists in cloud controlers
105
+
106
+
107
+features=['r1 s1','r1 s4','r1 s5','pa1 apiii']
108
+features=['r1 s1','r1 s4','r1 s5']
109
+featureNames={}
110
+featureNames['r1 s1']='$T_{evap}$'
111
+featureNames['r1 s4']='$T_{cond}$'
112
+featureNames['r1 s5']='$T_{air}$'
113
+featureNames['pa1 apiii']='$P_{elec}$'
114
+
115
+unitNames={}
116
+unitNames['r1 s1']='$(^{o}C)$'
117
+unitNames['r1 s4']='$(^{o}C)$'
118
+unitNames['r1 s5']='$(^{o}C)$'
119
+unitNames['pa1 apiii']='$(W)$'
120
+
121
+
122
+#features=['r1 s1','r1 s2','r1 s3','r1 s4','r1 s5','r1 s6','r1 s7','r1 s8','r1 s9','r1 s10','r2 s1','r2 s2','r2 s3','r2 s4','r2 s5','r2 s6','r2 s7','r2 s8','r2 s9','pa1 apiii','tc s1','tc s2']
123
+
124
+#features=['r2 s2', 'tc s1','r1 s10','r1 s6','r2 s8']
125
+
126
+NumFeatures=len(features)
127
+
128
+df_list=[]
129
+for i in range(NumberOfFailures+1):
130
+    df_list.append([])
131
+
132
+for i in range(NumberOfFailures+1):
133
+    dftemp=[]
134
+    for f in datafiles[facilitySetpoint][i]:
135
+        print("                 ", f)
136
+        #df1 = pd.read_csv('./data/'+f+'.csv', parse_dates=['datetime'], dayfirst=True, index_col='datetime')
137
+        df1 = pd.read_csv('./data/'+f+'.csv')
138
+        dftemp.append(df1)
139
+    df_list[i]=pd.concat(dftemp)
140
+
141
+
142
+# subsampled to 5'  =  30 * 10"
143
+# We consider smaples every 5' because in production, we will only have data at this frequency
144
+subsamplingrate=30
145
+
146
+dataframe=[]
147
+for i in range(NumberOfFailures+1):
148
+    dataframe.append([])
149
+
150
+for i in range(NumberOfFailures+1):
151
+    datalength=df_list[i].shape[0]
152
+    dataframe[i]=df_list[i].iloc[range(0,datalength,subsamplingrate)][features]
153
+    dataframe[i].reset_index(inplace=True,drop=True)
154
+    dataframe[i].dropna(inplace=True)
155
+
156
+
157
+# Train data is first 2/3 of data
158
+# Test data is: last 1/3 of data 
159
+dataTrain=[]
160
+dataTest=[]
161
+NumberOfSamplesForTest=300
162
+
163
+for i in range(NumberOfFailures+1):
164
+    dataTrain.append(dataframe[i].values[0:int(dataframe[i].shape[0]*2/3),:])
165
+    if NumberOfSamplesForTest==0:  # Take all
166
+        dataTest.append(dataframe[i].values[int(dataframe[i].shape[0]*2/3):,:])
167
+    else:
168
+        dataTest.append(dataframe[i].values[int(dataframe[i].shape[0]*2/3):int(dataframe[i].shape[0]*2/3)+NumberOfSamplesForTest,:])
169
+
170
+# Calculate means and stdev
171
+a=dataTrain[0]
172
+for i in range(1,NumberOfFailures+1):
173
+    a=np.vstack((a,dataTrain[i]))
174
+
175
+means=a.mean(axis=0) 
176
+stdevs=a.std(axis=0)
177
+def normalize2(train,test):
178
+    return( (train-means)/stdevs, (test-means)/stdevs )
179
+
180
+dataTrainNorm=[]
181
+dataTestNorm=[]
182
+for i in range(NumberOfFailures+1):
183
+    dataTrainNorm.append([])
184
+    dataTestNorm.append([])
185
+
186
+for i in range(NumberOfFailures+1):
187
+    (dataTrainNorm[i],dataTestNorm[i])=normalize2(dataTrain[i],dataTest[i])
188
+
189
+def plotData():    
190
+    fig, axes = plt.subplots(
191
+        nrows=NumberOfFailures+1, ncols=2, figsize=(15, 20), dpi=80, facecolor="w", edgecolor="k",sharex=True
192
+    )
193
+    for i in range(NumberOfFailures+1):
194
+        axes[i][0].plot(np.concatenate((dataTrainNorm[i][:,0],dataTestNorm[i][:,0])),label="Fail "+str(i)+",  feature 0")
195
+        axes[i][1].plot(np.concatenate((dataTrainNorm[i][:,1],dataTestNorm[i][:,1])),label="Fail "+str(i)+",  feature 1")
196
+    #axes[1].legend()
197
+    #axes[0].set_ylabel(features[0])
198
+    #axes[1].set_ylabel(features[1])
199
+    plt.show()
200
+
201
+#plotData()
202
+#exit()
203
+
204
+def create_sequences(values, time_steps):
205
+    output = []
206
+    for i in range(len(values) - time_steps ):
207
+        output.append(values[i : (i + time_steps)])
208
+    return np.stack(output)
209
+
210
+def listToString(l):
211
+    r=''
212
+    for i in l:
213
+        r+=str(i)
214
+    return(r.replace(' ',''))
215
+
216
+timesteps=int(options.timesteps)
217
+
218
+X=dataTrainNorm[0]
219
+for i in range(1,NumberOfFailures+1):
220
+    X=np.vstack((X,dataTrainNorm[i]))
221
+
222
+xtrain=create_sequences(X,timesteps)
223
+
224
+km = TimeSeriesKMeans(n_clusters=NumberOfFailures+1, metric="dtw", random_state=0)
225
+modelpath="model_v2_unsupervised_"+facilitySetpoint+"_"+str(timesteps)+listToString(features)+".pk"
226
+if options.train:
227
+    km.fit(xtrain)
228
+    km.to_pickle(modelpath)
229
+else:
230
+    km.from_pickle(modelpath)
231
+    #km.fit_predict(xtrain)
232
+
233
+colorline=['violet','lightcoral','cyan','lime','grey']
234
+colordot=['darkviolet','red','blue','green','black']
235
+
236
+
237
+featuresToPlot=features
238
+indexesToPlot=[]
239
+for i in featuresToPlot:
240
+    indexesToPlot.append(features.index(i))
241
+
242
+
243
+def plot(data,ranges):
244
+    km.fit_predict(data)
245
+# Expand data to plot with the timesteps samples of last sample
246
+    datatoplot=data[:,0,:]
247
+    datatoplot=np.vstack((datatoplot,data[ranges[len(ranges)-1][1],:,:]))
248
+
249
+    labels=[]    #   Labels are assigned randomly by classifer
250
+    for i in range(len(ranges)):
251
+        b=Counter(km.labels_[ranges[i][0]:ranges[i][1]])
252
+        labels.append(b.most_common(1)[0][0])
253
+
254
+    print("\n\n\n\LABELS: ",labels,"\n\n")
255
+
256
+    NumFeaturesToPlot=len(indexesToPlot)
257
+    plt.rcParams.update({'font.size': 16})
258
+    fig, axes = plt.subplots(
259
+        nrows=NumFeaturesToPlot, ncols=1, figsize=(15, 10), dpi=80, facecolor="w", edgecolor="k",sharex=True
260
+    )
261
+    for i in range(NumFeaturesToPlot):
262
+        init=0
263
+        end=ranges[0][1]
264
+        labelsplotted=[]
265
+        for j in range(len(ranges)):
266
+            if j==(len(ranges)-1): # Plot the last timesteps
267
+                classtype=labels.index(labels[j])
268
+                if classtype in labelsplotted:
269
+                    axes[i].plot(range(init,end+timesteps),datatoplot[ranges[j][0]:ranges[j][1]+timesteps,indexesToPlot[i]]*stdevs[i]+means[i], color=colorline[classtype],linewidth=1)
270
+                else:
271
+                    axes[i].plot(range(init,end+timesteps),datatoplot[ranges[j][0]:ranges[j][1]+timesteps,indexesToPlot[i]]*stdevs[i]+means[i], label="Class: "+str(classtype), color=colorline[classtype],linewidth=1)
272
+                    labelsplotted.append(classtype)
273
+            else:
274
+                classtype=labels.index(labels[j])
275
+                if classtype in labelsplotted:
276
+                    axes[i].plot(range(init,end),datatoplot[ranges[j][0]:ranges[j][1],indexesToPlot[i]]*stdevs[i]+means[i], color=colorline[classtype],linewidth=1)
277
+                else:
278
+                    axes[i].plot(range(init,end),datatoplot[ranges[j][0]:ranges[j][1],indexesToPlot[i]]*stdevs[i]+means[i], label="Class: "+str(classtype), color=colorline[classtype],linewidth=1)
279
+                    labelsplotted.append(classtype)
280
+            init=end
281
+            if j<(len(ranges)-1):
282
+                end+=(ranges[j+1][1]-ranges[j+1][0])
283
+        x=[]
284
+        y=[]
285
+        for j in range(len(ranges)):
286
+            x.append([])
287
+            y.append([])
288
+
289
+        for j in range(len(ranges)):
290
+            for k in range(ranges[j][0],ranges[j][1]):
291
+                try:  # Idont know why sometimes fails index !!!!
292
+                    x[labels.index(km.labels_[k])].append(k+timesteps)
293
+                    y[labels.index(km.labels_[k])].append(datatoplot[k+timesteps,indexesToPlot[i]]*stdevs[i]+means[i])
294
+                except:
295
+                    x[0].append(k+timesteps)
296
+                    y[0].append(datatoplot[k+timesteps,indexesToPlot[i]]*stdevs[i]+means[i])
297
+
298
+        labelsplotted=[]
299
+        for j in range(len(ranges)):
300
+            classtype=labels.index(labels[j])
301
+            if classtype in labelsplotted:
302
+                axes[i].plot(x[j],y[j] ,color=colordot[labels.index(labels[j])],marker='.',linewidth=0)
303
+            else:
304
+                axes[i].plot(x[j],y[j] ,color=colordot[labels.index(labels[j])],marker='.',linewidth=0,label="Class type "+str(j) )
305
+                labelsplotted.append(classtype)
306
+
307
+        if i==(NumFeatures-1):
308
+            axes[i].legend(loc='right')
309
+        s=''
310
+        s+=featureNames[features[indexesToPlot[i]]]
311
+        s+=' '+unitNames[features[indexesToPlot[i]]]
312
+        axes[i].set_ylabel(s)
313
+        axes[i].grid()
314
+
315
+    axes[NumFeaturesToPlot-1].set_xlabel("Sample number")
316
+    plt.show()
317
+        
318
+   
319
+'''
320
+Ranges=[]
321
+r=0
322
+for i in range(NumberOfFailures+1):
323
+    Ranges.append([r,r+dataTrainNorm[i].shape[0]])
324
+    r+=dataTrainNorm[i].shape[0]
325
+# Drop the last TIME_STEPS for plotting
326
+Ranges[NumberOfFailures][1]=Ranges[NumberOfFailures][1]-timesteps-1
327
+
328
+plot(xtrain,Ranges)
329
+'''
330
+# Try with test data
331
+X=dataTestNorm[0]
332
+Ranges=[[0,dataTestNorm[0].shape[0]]]
333
+r=dataTestNorm[0].shape[0]
334
+for i in range(1,NumberOfFailures+1):
335
+    X=np.vstack((X,dataTestNorm[i]))
336
+    Ranges.append([r,r+dataTestNorm[i].shape[0]])
337
+    r+=dataTestNorm[i].shape[0]
338
+
339
+X=np.vstack((X,dataTestNorm[0]))  # We add a last segment of no fail data
340
+Ranges.append([r,r+dataTestNorm[0].shape[0]])
341
+Ranges[len(Ranges)-1][1]=Ranges[len(Ranges)-1][1]-timesteps-1
342
+
343
+xtest=create_sequences(X,timesteps)
344
+
345
+
346
+
347
+
348
+def anomalyMetric(labels,ranges):
349
+    # Takes ONLY the firsts segments of Ranges
350
+    # FP, TP: false/true positive
351
+    # TN, FN: true/false negative
352
+    # Sensitivity (recall): probab failure detection if data is fail: TP/(TP+FN)
353
+    # Precision: Rate of positive results:  TP/(TP+FP)  
354
+    # F1-score: predictive performance measure: 2*Precision*Sensitity/(Precision+Sensitity)
355
+
356
+    lab=[]    #   Labels are assigned randomly by classifer
357
+    TP=[]
358
+    FN=[]
359
+    TPFP=[]
360
+    for i in range(NumberOfFailures+1):
361
+        TP.append([])
362
+        FN.append([])
363
+        TPFP.append([])
364
+        b=Counter(labels[ranges[i][0]:ranges[i][1]])
365
+        lab.append(b.most_common(1)[0][0])
366
+
367
+    for i in range(NumberOfFailures+1):
368
+        counttp=0
369
+        countfn=0
370
+        for j in range(ranges[i][0],ranges[i][1]-timesteps):
371
+            if lab.index(labels[j])==i:
372
+                counttp+=1
373
+            else:
374
+                countfn+=1
375
+        TP[i]=counttp
376
+        FN[i]=countfn
377
+
378
+    for i in range(NumberOfFailures+1):
379
+        count=0
380
+        for ii in range(NumberOfFailures+1):
381
+            for j in range(ranges[ii][0],ranges[ii][1]-timesteps):
382
+                if lab.index(labels[j])==i:
383
+                    count+=1
384
+            TPFP[i]=count
385
+
386
+    segmentLength=[]
387
+    for i in range(NumberOfFailures+1):
388
+        segmentLength.append(ranges[i][1]-timesteps-ranges[i][0])
389
+    totalSegmentLength=0
390
+    for i in range(NumberOfFailures+1):
391
+        totalSegmentLength+=segmentLength[i]
392
+
393
+
394
+    Sensitivity=0
395
+    Precision=0
396
+    for i in range(NumberOfFailures+1):
397
+        Sensitivity+=TP[i]/(TP[i]+FN[i])*segmentLength[i]/totalSegmentLength
398
+        Precision+=TP[i]/(TPFP[i])*segmentLength[i]/totalSegmentLength
399
+
400
+
401
+    print(lab)
402
+    print("TP: ",TP)
403
+    print("FN: ",FN)
404
+    print("TPFP: ",TPFP)
405
+    print("Sensitivity: ",Sensitivity)
406
+    print("Precision: ",Precision)
407
+    print("F1-Score: ",2*Precision*Sensitivity/(Sensitivity+Precision))
408
+
409
+
410
+km.fit_predict(xtest)
411
+#anomalyMetric(km.labels_,Ranges)
412
+plot(xtest,Ranges)
413
+
414
+

Powered by TurnKey Linux.