@@ -1099,7 +1099,7 @@ def cpt_merge_x_files(file_paths):
1099
1099
1100
1100
merged_out_path = re .sub ("[a-zA-Z]+.tsv" , "merged.tsv" , file_path_1 )
1101
1101
1102
- tmp_file = re .sub ("[a-zA-Z]+.tsv" , "batch.bat" , file_path_1 )
1102
+ tmp_file = re .sub ("[a-zA-Z]+.tsv" , "batch.bat" , file_path_1 )
1103
1103
1104
1104
# argumentos para el batch
1105
1105
if platform .system () == "Windows" :
@@ -1142,7 +1142,7 @@ def cpt_merge_x_files(file_paths):
1142
1142
1143
1143
1144
1144
# verificacion de que se creó el archivo tempora;l
1145
- if not os .path .exists (tmp_file ):
1145
+ if not os .path .exists (merged_out_path ):
1146
1146
status = "Failed: Error en la creación del archivo temporal"
1147
1147
else :
1148
1148
# copia del archivo a merged out path
@@ -1158,6 +1158,7 @@ def cpt_merge_x_files(file_paths):
1158
1158
status = "Failed: Error al ejecutar CPT_batch for merging"
1159
1159
1160
1160
1161
+
1161
1162
return status
1162
1163
1163
1164
@@ -1174,7 +1175,7 @@ def cpt_merge_x_files(file_paths):
1174
1175
#########################################################
1175
1176
print (os .path .join ("D:/" , "andres" ))
1176
1177
#define some global variables (some paths should be already defined in runMain so may not be necesary here)
1177
- root_dir = os .path .join ("D:" + os .sep , "documents_andres" , "pr_r " , "Colombia" ,"inputs" )
1178
+ root_dir = os .path .join ("D:" + os .sep , "documents_andres" , "pr_descarga " , "Colombia" ,"inputs" )
1178
1179
main_dir = os .path .join (root_dir , "prediccionClimatica" )
1179
1180
path_dpto = os .path .join (main_dir , 'estacionesMensuales' )#dir_response
1180
1181
dir_save = os .path .join (main_dir , "descarga" ) #paste0(dirPrediccionInputs, "descarga", sep = "", collapse = NULL)
@@ -1203,22 +1204,38 @@ def cpt_merge_x_files(file_paths):
1203
1204
1204
1205
1205
1206
dir_names = os .listdir (path_dpto )
1207
+ path_stations = glob .glob (f"{ path_dpto } \\ **\\ stations.csv" , recursive = True )
1206
1208
path_json = glob .glob (f"{ path_dpto } \\ **\\ cpt_areas.json" , recursive = True )
1207
1209
init_params = {k : load_json (pth ) for k ,pth in zip (dir_names , path_json )}
1208
1210
1211
+ #sacar del analisis departamentos donde el cpt_areas.json no tenga nada
1212
+ items_lgth = {k : sum ([len (x ["areas" ]) for x in val ]) for k ,val in init_params .items ()}
1213
+ empty_dpt = [k for k ,v in items_lgth .items () if v == 0 ]
1214
+ lgth_list = [v for k ,v in items_lgth .items ()]
1215
+ if len (empty_dpt ) > 0 :
1216
+ for idx in range (len (items_lgth )):
1217
+ if lgth_list [idx ] == 0 :
1218
+ dir_names .pop (idx )
1219
+ path_json .pop (idx )
1220
+ path_stations .pop (idx )
1221
+ for nm in empty_dpt :
1222
+ init_params .pop (nm )
1223
+
1224
+
1225
+
1226
+
1209
1227
month = int (date .today ().strftime ("%m" ))
1210
1228
year = int (date .today ().strftime ("%Y" ))
1211
1229
season = {k : [x ["season" ] for x in val if len (x ['areas' ] )!= 0 ] for k ,val in init_params .items ()}
1212
1230
month_season = {k : [get_season_months (x ["season" ], month_abb = month_abb ) for x in val if len (x ['areas' ] )!= 0 ] for k ,val in init_params .items ()}
1213
1231
predictands = {k : [x ["predictand" ] for x in val if len (x ['areas' ] )!= 0 ] for k ,val in init_params .items ()}
1214
1232
predictors = {k : [ len (np .unique (pd .DataFrame (x ["areas" ])["predictor" ].to_numpy ().tolist ())) for x in val if len (x ['areas' ] )!= 0 ] for k ,val in init_params .items ()}
1215
1233
1234
+ #list_elements = [len(x) for x in predictands.values()]
1216
1235
1217
1236
1218
1237
start_date = date .today ()+ timedelta (days = 30 )
1219
1238
years = {k : get_season_years (season_type = value [0 ]["type" ], month = month , year = year ) for k ,value in init_params .items ()}
1220
- {k : value [0 ]['type' ] for k ,value in init_params .items ()}
1221
-
1222
1239
1223
1240
path_months_l = {x : os .path .join (main_dir , "run_CPT" , x ) for x in dir_names }
1224
1241
for ky ,pth in path_months_l .items ():
@@ -1275,7 +1292,7 @@ def cpt_merge_x_files(file_paths):
1275
1292
1276
1293
print ("\n Archivos de entrada cargados" )
1277
1294
1278
- path_stations = glob . glob ( f" { path_dpto } \\ ** \\ stations.csv" , recursive = True )
1295
+
1279
1296
data_y = {k : pd .read_csv (fl ) for k ,fl in zip (dir_names , path_stations )}
1280
1297
part_id = {k : files_y (df , k , main_dir = main_dir ) for k ,df in data_y .items ()}
1281
1298
0 commit comments