simulation_analysis.whatif_scenarios
This module contains functions to model disruptions in a port simulation environment.
1""" 2This module contains functions to model disruptions in a port simulation environment. 3""" 4import random 5from pathlib import Path 6 7import pandas as pd 8import matplotlib.pyplot as plt 9import numpy as np 10import matplotlib.pyplot as plt 11 12from simulation_classes.port import Crane, Pipeline, Conveyor 13from simulation_handler.helpers import get_value_by_terminal 14from constants import SEVERE_HURRICANE, TROPICAL_STORM, HURRICANE_START, WARMUP_ITERS, NUM_MONTHS, NUM_RUNS, LOG_NUM, INBOUND_CLOSED, OUTBOUND_CLOSED 15 16 17 18 19def reduce_cranes(env, change, terminals, time_start, time_end, port_berths_container_terminals, terminal_data_cache, berths_aff_per): 20 """ 21 This function reduces the number of cranes in each berth by `change` amount by removing cranes from the berth's crane store between `time_start` and `time_end`. 22 It also restores the cranes after the reduction period. 23 Args: 24 env (simpy.Environment): The simulation environment. 25 change (int): The number of cranes to be removed (negative value) or added (positive value). 26 terminals (list): List of terminal indices where the cranes will be reduced. 27 time_start (int): The time at which the reduction starts. 28 time_end (int): The time at which the reduction ends. 29 port_berths_container_terminals (dict): Dictionary containing berth information for container terminals. 30 terminal_data_cache (dict): Cache containing terminal data for crane transfer rates. 31 berths_aff_per (float): Percentage of berths to be affected by the change. 32 Returns: 33 None 34 """ 35 port_berth_initially = {} 36 for terminal_idx in terminals: 37 port_berth_initially[terminal_idx] = [] 38 for berths in port_berths_container_terminals[terminal_idx].items: 39 port_berth_initially[terminal_idx].append(berths) 40 41 print( 42 f"Modelling disruption: Attempting to remove {-change} cranes from each berth in terminals {terminals} from time {time_start} to {time_end}") 43 44 yield env.timeout(time_start) 45 46 for terminal_idx in terminals: 47 num_berths = len(port_berth_initially[terminal_idx]) 48 berths_aff = int(berths_aff_per*num_berths) 49 count = 0 50 for berth in port_berth_initially[terminal_idx]: 51 count += 1 52 if change < 0: 53 num_to_remove = min(len(berth.cranes.items), abs(change)) 54 for _ in range(num_to_remove): 55 if berth.cranes.items: 56 crane = yield berth.cranes.get() 57 if count >= berths_aff: 58 break 59 60 yield env.timeout(time_end - time_start) 61 62 for terminal_idx in terminals: 63 num_berths = len(port_berth_initially[terminal_idx]) 64 berths_aff = int(berths_aff_per*num_berths) 65 count = 0 66 for berth in port_berth_initially[terminal_idx]: 67 count += 1 68 if change < 0: 69 for i in range(abs(change)): 70 restored_crane = Crane(id=f"restored_{berth.id}_{i}", width="width", crane_transfer_rate=get_value_by_terminal( 71 terminal_data_cache, 'Container', terminal_idx+1, 'transfer rate per unit')) 72 yield berth.cranes.put(restored_crane) 73 if count >= berths_aff: 74 break 75 76 77def reduce_pipelines(env, change, terminals, time_start, time_end, port_berth_liquid_terminals, terminal_data_cache, berths_aff_per): 78 """ 79 This function reduces the number of pipelines in each berth by `change` amount by removing pipelines from the berth's pipeline store between `time_start` and `time_end`. 80 It also restores the pipelines after the reduction period. 81 Args: 82 env (simpy.Environment): The simulation environment. 83 change (int): The number of pipelines to be removed (negative value) or added (positive value). 84 terminals (list): List of terminal indices where the pipelines will be reduced. 85 time_start (int): The time at which the reduction starts. 86 time_end (int): The time at which the reduction ends. 87 port_berth_liquid_terminals (dict): Dictionary containing berth information for liquid terminals. 88 terminal_data_cache (dict): Cache containing terminal data for pipeline transfer rates. 89 berths_aff_per (float): Percentage of berths to be affected by the change. 90 Returns: 91 None 92 """ 93 port_berth_initially = {} 94 for terminal_ids in terminals: 95 terminal_idx = terminal_ids-1 96 port_berth_initially[terminal_idx] = [] 97 for berths in port_berth_liquid_terminals[terminal_idx].items: 98 port_berth_initially[terminal_idx].append(berths) 99 100 print( 101 f"Modelling disruption: Attempting to remove {-change} pipelines from each berth in terminals {terminals} from time {time_start} to {time_end}") 102 103 yield env.timeout(time_start) 104 105 for terminal_ids in terminals: 106 terminal_idx = terminal_ids-1 107 num_berths = len(port_berth_initially[terminal_idx]) 108 berths_aff = int(berths_aff_per*num_berths) 109 count = 0 110 for berth in port_berth_initially[terminal_idx]: 111 if change < 0: 112 num_to_remove = min(len(berth.pipelines.items), abs(change)) 113 for _ in range(num_to_remove): 114 if berth.pipelines.items: 115 pipeline = yield berth.pipelines.get() 116 count += 1 117 if count >= berths_aff: 118 break 119 120 yield env.timeout(time_end - time_start) 121 122 for terminal_ids in terminals: 123 terminal_idx = terminal_ids-1 124 num_berths = len(port_berth_initially[terminal_idx]) 125 berths_aff = int(berths_aff_per*num_berths) 126 count = 0 127 for berth in port_berth_initially[terminal_idx]: 128 count += 1 129 if change < 0: 130 for i in range(abs(change)): 131 restored_pipeline = Pipeline(env=env, id=f"restored_{berth.id}_{i}", pump_rate_per_pipeline=get_value_by_terminal( 132 terminal_data_cache, "Liquid", terminal_ids+1, "transfer rate per unit")) 133 134 yield berth.pipelines.put(restored_pipeline) 135 if count >= berths_aff: 136 break 137 138 139def reduce_conveyors(env, change, terminals, time_start, time_end, port_berth_drybulk_terminals, terminal_data_cache, berths_aff_per): 140 """ 141 This function reduces the number of conveyors in each berth by `change` amount by removing conveyors from the berth's conveyor store between `time_start` and `time_end`. 142 It also restores the conveyors after the reduction period. 143 Args: 144 env (simpy.Environment): The simulation environment. 145 change (int): The number of conveyors to be removed (negative value) or added (positive value). 146 terminals (list): List of terminal indices where the conveyors will be reduced. 147 time_start (int): The time at which the reduction starts. 148 time_end (int): The time at which the reduction ends. 149 port_berth_drybulk_terminals (dict): Dictionary containing berth information for dry bulk terminals. 150 terminal_data_cache (dict): Cache containing terminal data for conveyor transfer rates. 151 berths_aff_per (float): Percentage of berths to be affected by the change. 152 Returns: 153 None 154 """ 155 port_berth_initially = {} 156 for terminal_idx in terminals: 157 port_berth_initially[terminal_idx] = [] 158 for berths in port_berth_drybulk_terminals[terminal_idx].items: 159 port_berth_initially[terminal_idx].append(berths) 160 161 print( 162 f"Modelling disruption: Attempting to remove {-change} conveyor from each berth in terminals {terminals} from time {time_start} to {time_end}") 163 164 yield env.timeout(time_start) 165 166 for terminal_idx in terminals: 167 num_berths = len(port_berth_initially[terminal_idx]) 168 berths_aff = int(berths_aff_per*num_berths) 169 count = 0 170 for berth in port_berth_initially[terminal_idx]: 171 count += 1 172 if change < 0: 173 num_to_remove = min(len(berth.conveyors.items), abs(change)) 174 for _ in range(num_to_remove): 175 if berth.conveyors.items: 176 conveyor = yield berth.conveyors.get() 177 if count >= berths_aff: 178 break 179 180 yield env.timeout(time_end - time_start) 181 182 for terminal_idx in terminals: 183 num_berths = len(port_berth_initially[terminal_idx]) 184 berths_aff = int(berths_aff_per*num_berths) 185 count = 0 186 187 for berth in port_berth_initially[terminal_idx]: 188 count += 1 189 if change < 0: 190 for i in range(abs(change)): 191 restored_conveyor = Conveyor(env=env, id=f"restored_{berth.id}_{i}", conveyor_rate_per_conveyor=get_value_by_terminal( 192 terminal_data_cache, "DryBulk", terminal_idx+1, "transfer rate per unit")) 193 yield berth.conveyors.put(restored_conveyor) 194 if count >= berths_aff: 195 break 196 197 198def reduce_berths(env, change, terminals, time_start, time_end, port_berths_terminal): 199 """ 200 This function reduces the number of berths in each terminal by `change` amount by removing berths from the terminal's berth store between `time_start` and `time_end`. 201 It also restores the berths after the reduction period. 202 Args: 203 env (simpy.Environment): The simulation environment. 204 change (int): The number of berths to be removed (negative value) or added (positive value). 205 terminals (list): List of terminal indices where the berths will be reduced. 206 time_start (int): The time at which the reduction starts. 207 time_end (int): The time at which the reduction ends. 208 port_berths_terminal (dict): Dictionary containing berth information for the terminals. 209 Returns: 210 None 211 """ 212 print( 213 f"Modelling disruption: Attempting to remove {-change} berths from terminals {terminals} from time {time_start} to {time_end}") 214 215 yield env.timeout(time_start) 216 217 removed_berths = {terminal: [] for terminal in terminals} 218 219 # Reduce capacity by taking items out of the store 220 for terminal in terminals: 221 for _ in range(abs(change)): 222 # Remove a berth to simulate reduced capacity 223 berth = yield port_berths_terminal[terminal].get() 224 removed_berths[terminal].append(berth) 225 print( 226 f"Terminal {terminal}: Berth capacity effectively reduced by {abs(change)} at time {env.now}") 227 228 yield env.timeout(time_end - time_start) 229 230 # Restore the removed berths to the store 231 for terminal in terminals: 232 for berth in removed_berths[terminal]: 233 yield port_berths_terminal[terminal].put(berth) 234 print( 235 f"Terminal {terminal}: Berth Capacity restored by {abs(change)} at time {env.now}") 236 237 238def reduce_yard(env, change, terminals, time_start, time_end, port_yard_container_terminals): 239 """ 240 This function reduces the yard capacity in each terminal by `change` amount by adding dummy containers to the terminal's yard store between `time_start` and `time_end`. 241 It also restores the yard capacity after the reduction period. 242 Args: 243 env (simpy.Environment): The simulation environment. 244 change (int): The number of yard spaces to be removed (negative value) or added (positive value). 245 terminals (list): List of terminal indices where the yard capacity will be reduced. 246 time_start (int): The time at which the reduction starts. 247 time_end (int): The time at which the reduction ends. 248 port_yard_container_terminals (dict): Dictionary containing yard information for the terminals. 249 Returns: 250 None""" 251 252 print( 253 f"Modelling disruption: Attempting to remove {-change} units yard space from terminals {terminals} from time {time_start} to {time_end} (Adding dummy containers)") 254 255 yield env.timeout(time_start) 256 257 dummy_containers = {terminal: [] for terminal in terminals} 258 259 for terminal in terminals: 260 for _ in range(abs(change)): 261 ctr = yield port_yard_container_terminals[terminal].put() 262 dummy_containers[terminal].append(ctr) 263 print( 264 f"Terminal {terminal}: Yard capacity effectively reduced by {abs(change)} ctrs at time {env.now}") 265 266 yield env.timeout(time_end - time_start) 267 268 # Restore the removed berths to the store 269 for terminal in terminals: 270 for berth in dummy_containers[terminal]: 271 yield dummy_containers[terminal].get(berth) 272 print( 273 f"Terminal {terminal}: Yard apacity restored by {abs(change)} at time {env.now}") 274 275 276def reduce_tank_capacity(env, change, terminals, time_start, time_end, port_tanks_liquid_terminals): 277 """ 278 This function reduces the tank capacity in each terminal by `change` amount by adding dummy liquid to the terminal's tank store between `time_start` and `time_end`. 279 It also restores the tank capacity after the reduction period. 280 Args: 281 env (simpy.Environment): The simulation environment. 282 change (int): The number of tank spaces to be removed (negative value) or added (positive value). 283 terminals (list): List of terminal indices where the tank capacity will be reduced. 284 time_start (int): The time at which the reduction starts. 285 time_end (int): The time at which the reduction ends. 286 port_tanks_liquid_terminals (dict): Dictionary containing tank information for the terminals. 287 Returns: 288 None 289 """ 290 print( 291 f"Modelling disruption: Attempting to remove {-change} units tank space from terminals {terminals} from time {time_start} to {time_end} (Adding dummy liquid)") 292 293 yield env.timeout(time_start) 294 295 # Reduce the content of the tanks 296 for terminal in terminals: 297 yield port_tanks_liquid_terminals[terminal].put(change) 298 print( 299 f"Terminal {terminal}: Tank capacity effectively reduced by {abs(change)} at time {env.now}") 300 301 yield env.timeout(time_end - time_start) 302 303 # Restore the content of the tanks 304 for terminal in terminals: 305 yield port_tanks_liquid_terminals[terminal].get(change) 306 print( 307 f"Terminal {terminal}: Tank capacity effectively increased by {abs(change)} at time {env.now}") 308 309 310def reduce_silo_capacity(env, change, terminals, time_start, time_end, port_silos_drybulk_terminals): 311 """ 312 This function reduces the silo capacity in each terminal by `change` amount by adding dummy dry bulk to the terminal's silo store between `time_start` and `time_end`. 313 It also restores the silo capacity after the reduction period. 314 Args: 315 env (simpy.Environment): The simulation environment. 316 change (int): The number of silo spaces to be removed (negative value) or added (positive value). 317 terminals (list): List of terminal indices where the silo capacity will be reduced. 318 time_start (int): The time at which the reduction starts. 319 time_end (int): The time at which the reduction ends. 320 port_silos_drybulk_terminals (dict): Dictionary containing silo information for the terminals. 321 Returns: 322 None 323 """ 324 print( 325 f"Modelling disruption: Attempting to remove {-change} units silo space from terminals {terminals} from time {time_start} to {time_end} (Adding dummy dry bulk)") 326 327 yield env.timeout(time_start) 328 329 # Reduce the content of the silos 330 for terminal in terminals: 331 yield port_silos_drybulk_terminals[terminal].put(change) 332 print( 333 f"Terminal {terminal}: Silo capacity effectively reduced by {abs(change)} at time {env.now}") 334 335 yield env.timeout(time_end - time_start) 336 337 # Restore the content of the silos 338 for terminal in terminals: 339 yield port_silos_drybulk_terminals[terminal].get(change) 340 print( 341 f"Terminal {terminal}: Silo capacity effectively increased by {abs(change)} at time {env.now}") 342 343 344def reduce_bay_capacity(env, reduction, terminals, start_time, end_time, port_loading_bays_liquid_terminals): 345 """ 346 This function reduces the bay capacity in each terminal by `reduction` amount by holding requests for bays in the terminal's bay store between `start_time` and `end_time`. 347 It also restores the bay capacity after the reduction period. 348 Args: 349 env (simpy.Environment): The simulation environment. 350 reduction (int): The number of bays to be removed (negative value) or added (positive value). 351 terminals (list): List of terminal indices where the bay capacity will be reduced. 352 start_time (int): The time at which the reduction starts. 353 end_time (int): The time at which the reduction ends. 354 port_loading_bays_liquid_terminals (dict): Dictionary containing bay information for the terminals. 355 Returns: 356 None 357 """ 358 359 print( 360 f"Modelling disruption: Attempting to remove {-reduction} bays from terminals {terminals} from time {start_time} to {end_time}") 361 362 yield env.timeout(start_time) 363 364 # Request `reduction` number of bays to reduce the effective capacity 365 requests = [] 366 for terminal in terminals: 367 for _ in range(reduction): 368 request = port_loading_bays_liquid_terminals[terminal].request() 369 yield request # Hold this request to reduce availability 370 requests.append((terminal, request)) 371 print( 372 f"{reduction} units of bays removed from terminal {terminal} at {start_time}") 373 yield env.timeout(end_time - start_time) 374 375 # Release the requests to restore full capacity 376 for terminal, request in requests: 377 port_loading_bays_liquid_terminals[terminal].release(request) 378 print( 379 f"{reduction} units of bays restored from terminal {terminal} at {end_time}") 380 381 382def reduce_chassis(env, change, terminals, time_start, time_end, port_chassis_container_terminals): 383 """ 384 This function reduces the chassis capacity in each terminal by `change` amount by removing chassis from the terminal's chassis store between `time_start` and `time_end`. 385 It also restores the chassis capacity after the reduction period. 386 Args: 387 env (simpy.Environment): The simulation environment. 388 change (int): The number of chassis to be removed (negative value) or added (positive value). 389 terminals (list): List of terminal indices where the chassis capacity will be reduced. 390 time_start (int): The time at which the reduction starts. 391 time_end (int): The time at which the reduction ends. 392 port_chassis_container_terminals (dict): Dictionary containing chassis information for the terminals. 393 Returns: 394 None 395 """ 396 397 print( 398 f"Modelling disruption: Attempting to remove {-change} chassis from terminals {terminals} from time {time_start} to {time_end}") 399 400 yield env.timeout(time_start) 401 402 # Reduce the content of the silos 403 for terminal in terminals: 404 yield port_chassis_container_terminals[terminal].get(change) 405 print( 406 f"Terminal {terminal}: Number of chassis reduced by {abs(change)} at time {env.now}") 407 408 yield env.timeout(time_end - time_start) 409 410 # Restore the content of the silos 411 for terminal in terminals: 412 yield port_chassis_container_terminals[terminal].put(change) 413 print( 414 f"Terminal {terminal}: Number of chassis increased by {abs(change)} at time {env.now}") 415 416 417def reduce_truck_gates(env, reduction, terminals, start_time, end_time, truck_gates): 418 """ 419 This function reduces the truck gate capacity in each terminal by `reduction` amount by holding requests for truck gates in the terminal's truck gate store between `start_time` and `end_time`. 420 It also restores the truck gate capacity after the reduction period. 421 Args: 422 env (simpy.Environment): The simulation environment. 423 reduction (int): The number of truck gates to be removed (negative value) or added (positive value). 424 terminals (list): List of terminal indices where the truck gate capacity will be reduced. 425 start_time (int): The time at which the reduction starts. 426 end_time (int): The time at which the reduction ends. 427 truck_gates (dict): Dictionary containing truck gate information for the terminals. 428 Returns: 429 None 430 """ 431 print( 432 f"Modelling disruption: Attempting to remove {-reduction} truck gates from terminals {terminals} from time {start_time} to {end_time}") 433 434 yield env.timeout(start_time) 435 436 # Request `reduction` number of bays to reduce the effective capacity 437 requests = [] 438 for terminal in terminals: 439 for _ in range(reduction): 440 request = truck_gates[terminal].request() 441 yield request # Hold this request to reduce availability 442 requests.append((terminal, request)) 443 print( 444 f"{reduction} truck gates removed from terminal {terminal} at {start_time}") 445 yield env.timeout(end_time - start_time) 446 447 # Release the requests to restore full capacity 448 for terminal, request in requests: 449 truck_gates[terminal].release(request) 450 print( 451 f"{reduction} truck gates restored from terminal {terminal} at {end_time}") 452 453 454def adjust_arrival_rate(start_time, end_time, rate_parameter, run_id, plot_input=True): 455 """ 456 This function adjusts the arrival times of ships in the ship data CSV file by a given rate parameter. 457 It modifies the arrival times of ships that fall within the specified time range and saves the updated data back to the CSV file. 458 Args: 459 start_time (float): The start time of the range to adjust arrival times. 460 end_time (float): The end time of the range to adjust arrival times. 461 rate_parameter (float): The factor by which to adjust the arrival times. 462 run_id (str): The unique identifier for the simulation run. 463 plot_input (bool): Whether to plot the input data before and after adjustment. 464 returns: 465 ship_data_df (pd.DataFrame): The updated ship data DataFrame with adjusted arrival times. 466 ship_data (dict): The updated ship data as a dictionary. 467 """ 468 469 ship_data = pd.read_csv(f".{run_id}/logs/ship_data.csv") 470 in_time_range = (ship_data['arrival'] >= start_time) & ( 471 ship_data['arrival'] <= end_time) 472 ship_data['old_arrival'] = ship_data['arrival'] 473 ship_data.loc[in_time_range, 'arrival'] /= rate_parameter 474 475 ship_data_df = ship_data.drop(columns=['interarrival']) 476 ship_data_df = ship_data_df.sort_values('arrival') 477 output_path = f'.{run_id}/logs/ship_data.csv' 478 ship_data_df.to_csv(output_path, index=False) 479 ship_data = ship_data_df.to_dict(orient="index") 480 481 if plot_input: 482 fig, axes = plt.subplots(1, 2, figsize=(14, 6)) 483 axes[0].hist(ship_data_df['old_arrival'], 484 bins=30, color='blue', alpha=0.7) 485 axes[0].set_xlabel('Arrival Time') 486 axes[0].set_ylabel('Frequency') 487 axes[0].set_title('Old Arrival Times') 488 axes[1].hist(ship_data_df['arrival'], bins=30, 489 color='green', alpha=0.7) 490 axes[1].set_xlabel('Arrival Time') 491 axes[1].set_ylabel('Frequency') 492 axes[1].set_title('New Arrival Times') 493 min_x = min(ship_data_df['old_arrival'].min(), 494 ship_data_df['arrival'].min()) 495 max_x = max(ship_data_df['old_arrival'].max(), 496 ship_data_df['arrival'].max()) 497 min_y = 0 498 max_y = max(axes[0].get_ylim()[1], axes[1].get_ylim()[1]) 499 500 for ax in axes: 501 ax.set_xlim(min_x, max_x) 502 ax.set_ylim(min_y, max_y) 503 504 plt.tight_layout() 505 plt.savefig(f".{run_id}/plots/scenarios/factor_arrival_scnario_input") 506 plt.close() 507 508 return ship_data_df, ship_data 509 510 511def stop_and_bulk_arrival(start_time, end_time, run_id, plot_input=False): 512 """ 513 This function modifies the ship arrival times in the ship data CSV file by stopping arrivals between `start_time` and `end_time`. 514 It also simulates a bulk arrival scenario after `end_time` by distributing the arrivals evenly over a recovery period. 515 Args: 516 start_time (float): The start time of the range to stop arrivals. 517 end_time (float): The end time of the range to stop arrivals. 518 run_id (str): The unique identifier for the simulation run. 519 plot_input (bool): Whether to plot the input data before and after adjustment. 520 returns: 521 ship_data_df (pd.DataFrame): The updated ship data DataFrame with adjusted arrival times. 522 ship_data_dict (dict): The updated ship data as a dictionary. 523 """ 524 525 ship_data = pd.read_csv(f'.{run_id}/logs/ship_data.csv') 526 ship_data['old_arrival'] = ship_data['arrival'] 527 in_time_range = (ship_data['arrival'] >= start_time) & ( 528 ship_data['arrival'] <= end_time) 529 530 num_ships = in_time_range.sum() 531 recovery_period = 24*5 # 5 days 532 gap = recovery_period/num_ships 533 print( 534 f"Modelling disruption: Stopping arrivals between {start_time} and {end_time} and bulk arrivals after {end_time} with a gap of {gap} hours") 535 536 bulk_arrival_time = end_time + 0.1 537 ship_data.loc[in_time_range, 'arrival'] = [ 538 bulk_arrival_time + i * gap for i in range(in_time_range.sum())] 539 if 'interarrival' in ship_data.columns: 540 ship_data = ship_data.drop(columns=['interarrival']) 541 ship_data_df = ship_data.sort_values('arrival') 542 output_path = f'.{run_id}/logs/ship_data.csv' 543 ship_data_df.to_csv(output_path, index=False) 544 ship_data_dict = ship_data_df.to_dict(orient="index") 545 546 if plot_input: 547 fig, axes = plt.subplots(1, 2, figsize=(14, 6)) 548 axes[0].hist(ship_data_df['old_arrival'], 549 bins=30, color='blue', alpha=0.7) 550 axes[0].set_xlabel('Arrival Time') 551 axes[0].set_ylabel('Frequency') 552 axes[0].set_title('Old Arrival Times') 553 axes[1].hist(ship_data_df['arrival'], bins=30, 554 color='green', alpha=0.7) 555 axes[1].set_xlabel('Arrival Time') 556 axes[1].set_ylabel('Frequency') 557 axes[1].set_title('New Arrival Times') 558 min_x = min(ship_data_df['old_arrival'].min(), 559 ship_data_df['arrival'].min()) 560 max_x = max(ship_data_df['old_arrival'].max(), 561 ship_data_df['arrival'].max()) 562 min_y = 0 563 max_y = max(axes[0].get_ylim()[1], axes[1].get_ylim()[1]) 564 565 for ax in axes: 566 ax.set_xlim(min_x, max_x) 567 ax.set_ylim(min_y, max_y) 568 569 plt.tight_layout() 570 plt.savefig(f".{run_id}/plots/scenarios/bulk_arrival_scenario_input") 571 plt.close() 572 573 return ship_data_df, ship_data_dict 574 575 576def filter_data_by_arrival_rate(data, time_arrival_dict, time_column): 577 """ 578 Filter data based on arrival rate factors for different time periods. 579 580 Parameters: 581 ----------- 582 data : pd.DataFrame 583 The dataframe to filter (truck_data or train_data) 584 time_arrival_dict : dict 585 Dictionary with (start_time, end_time): arrival_rate_factor 586 e.g., {(6.0, 10.0): 0.8, (10.0, 15.0): 0.6} 587 arrival_rate_factor of 0.8 means keep 80% of data (remove 20%) 588 time_column : str 589 Name of the time column to use for filtering 590 ('arrival_time' for trucks, 'arrival at' for trains) 591 592 Returns: 593 -------- 594 pd.DataFrame 595 Filtered dataframe with specified removal rates applied 596 """ 597 598 # Create a copy to avoid modifying original data 599 filtered_data = data.copy() 600 601 # Track which rows to keep 602 # keep_mask = pd.Series([True] * len(filtered_data)) 603 604 keep_mask = pd.Series(True, index=filtered_data.index) 605 606 607 for (start_time, end_time), arrival_rate_factor in time_arrival_dict.items(): 608 # Find rows in this time window 609 time_mask = (filtered_data[time_column] >= start_time) & (filtered_data[time_column] < end_time) 610 rows_in_window = filtered_data[time_mask] 611 612 if len(rows_in_window) == 0: 613 continue 614 615 # Calculate how many rows to remove 616 removal_rate = 1 - arrival_rate_factor 617 num_to_remove = int(len(rows_in_window) * removal_rate) 618 619 if num_to_remove > 0: 620 # Get indices of rows in this window 621 window_indices = rows_in_window.index.tolist() 622 623 # Create equispaced removal indices 624 # We remove equispaced samples to maintain temporal distribution 625 removal_step = len(window_indices) / num_to_remove if num_to_remove > 0 else len(window_indices) 626 removal_indices = [] 627 628 for i in range(num_to_remove): 629 idx_position = int(i * removal_step) 630 if idx_position < len(window_indices): 631 removal_indices.append(window_indices[idx_position]) 632 633 # Mark these indices for removal 634 keep_mask[removal_indices] = False 635 636 # Apply the filter 637 filtered_data = filtered_data[keep_mask] 638 639 return filtered_data.reset_index(drop=True) 640 641def filter_truck_data(run_id, time_arrival_dict): 642 """ 643 Filter truck data based on arrival rate factors. 644 645 Parameters: 646 ----------- 647 run_id : str 648 Unique identifier for the simulation run 649 time_arrival_dict : dict 650 Dictionary with (start_time, end_time): arrival_rate_factor 651 652 Returns: 653 -------- 654 pd.DataFrame 655 Filtered truck dataframe 656 """ 657 truck_data = pd.read_pickle(f'.{run_id}/logs/truck_data.pkl') 658 updated_truck_data_path = f'.{run_id}/logs/truck_data.pkl' 659 print(truck_data.head()) 660 filtered_truck_data = filter_data_by_arrival_rate(truck_data, time_arrival_dict, 'arrival') 661 filtered_truck_data.to_pickle(updated_truck_data_path) 662 return filtered_truck_data 663 664def filter_train_data(run_id, time_arrival_dict): 665 """ 666 Filter train data based on arrival rate factors. 667 668 Parameters: 669 ----------- 670 run_id : str 671 Unique identifier for the simulation run 672 time_arrival_dict : dict 673 Dictionary with (start_time, end_time): arrival_rate_factor 674 675 Returns: 676 -------- 677 pd.DataFrame 678 Filtered train dataframe 679 """ 680 train_data = pd.read_csv(f'.{run_id}/logs/train_data.csv') 681 updated_train_data_path = f'.{run_id}/logs/train_data.csv' 682 filtered_train_data = filter_data_by_arrival_rate(train_data, time_arrival_dict, 'arrival at') 683 filtered_train_data.to_csv(updated_train_data_path, index=False) 684 return filtered_train_data 685 686def model_hurricane(env, terminal_resouces, num_terminals_list, terminal_data, run_id, seed): 687 """ 688 This function models a hurricane scenario by reducing the number of berths, cranes, pipelines, and conveyors in the port terminals. 689 It simulates the disruption by adjusting the resources available in the terminals between specified time periods. 690 Specificcally, the following changes are made: 691 - Reduces the number of berths in all container terminals by 1 from time 1165 to 2004. 692 - Reduces the number of berths in 10% of liquid and dry bulk terminals by 1 from time 1165 to 2004. 693 - Reduces the number of cranes in all container terminals by 1 from time 1165 to 2340. 694 - Reduces the number of pipelines in 25% of liquid terminals by 1 from time 1165 to 2340. 695 - Reduces the number of conveyors in 25% of dry bulk terminals by 1 from time 1165 to 2340. 696 Args: 697 env (simpy.Environment): The simulation environment. 698 terminal_resouces (tuple): A tuple containing the resources for the port terminals. 699 num_terminals_list (list): A list containing the number of container, liquid, and dry bulk terminals. 700 terminal_data (dict): A dictionary containing terminal data for crane transfer rates. 701 run_id (str): The unique identifier for the simulation run. 702 seed (int): The random seed for reproducibility. 703 returns: 704 None 705 """ 706 707 random.seed(seed) 708 709 port_berths_container_terminals, port_yard_container_terminals, port_berth_liquid_terminals, port_tanks_liquid_terminals, \ 710 port_berth_drybulk_terminals, port_silos_drybulk_terminals, port_loading_bays_liquid_terminals, port_drybulk_bays_drybulk_terminals, \ 711 port_chassis_container_terminals, truck_gates_ctr, truck_gates_liquid, truck_gates_dk, train_loading_racks_ctr, train_loading_racks_liquid, \ 712 train_loading_racks_dk, day_pilots, night_pilots, tugboats, channel_scheduler = terminal_resouces 713 714 num_container_terminals, num_liquid_terminals, num_drybulk_terminals = num_terminals_list 715 716 if SEVERE_HURRICANE == True and TROPICAL_STORM == False: 717 718 # stop all arrivals between 1500 and 1665 719 time_start = HURRICANE_START 720 disruption_duration = 24*6 721 722 # No arrivals for 6 days 723 stop_and_bulk_arrival(time_start, time_start + disruption_duration, run_id, plot_input=True) # 7 days better 724 725 # recovery periods 726 time_end_1_week = time_start + 24*7 # 7 days 727 time_end_2_week = time_start + 24*7 + 24*7 # 7 days 728 time_end_3_weeks = time_start + 24*7 + 24*14 # 14 days 729 time_end_4_weeks = time_start + 24*7 + 24*21 # 21 days 730 731 # For 1st week, 100% of berths have one crane down, 25% of liquid and dry bulk terminals have one pipeline and conveyor down in 50% of berths 732 env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 733 time_start, time_end_1_week, port_berths_container_terminals, terminal_data, 0.3)) 734 selected_liquid_terminals = random.sample( 735 range(num_liquid_terminals), int(0.1 * num_liquid_terminals)) 736 selected_drybulk_terminals = random.sample( 737 range(num_drybulk_terminals), int(0.5 * num_drybulk_terminals)) 738 env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 739 time_start, time_end_1_week, port_berth_liquid_terminals, terminal_data, 0.3)) 740 env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 741 time_start, time_end_1_week, port_berth_drybulk_terminals, terminal_data, 0.3)) 742 743 # For 1st week, 100% of berths have one crane down, 25% of liquid and dry bulk terminals have one pipeline and conveyor down in 50% of berths 744 env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 745 time_end_1_week, time_end_2_week, port_berths_container_terminals, terminal_data, 0.30)) 746 env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 747 time_end_1_week, time_end_2_week, port_berth_liquid_terminals, terminal_data, 0.30)) 748 env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 749 time_end_1_week, time_end_2_week, port_berth_drybulk_terminals, terminal_data, 0.30)) 750 751 # For 2nd week, 25% of berths have one crane down, 25% of (same) liquid and dry bulk terminals have one pipeline and conveyor down in 25% of berths 752 env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 753 time_end_2_week, time_end_3_weeks, port_berths_container_terminals, terminal_data, 0.2)) 754 env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 755 time_end_2_week, time_end_3_weeks, port_berth_liquid_terminals, terminal_data, 0.2)) 756 env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 757 time_end_2_week, time_end_3_weeks, port_berth_drybulk_terminals, terminal_data, 0.2)) 758 759 # For 3rd week, 10% of berths have one crane down, 25% of (same) liquid and dry bulk terminals have one pipeline and conveyor down in 10% of berths 760 env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 761 time_end_3_weeks, time_end_4_weeks, port_berths_container_terminals, terminal_data, 0.1)) 762 env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 763 time_end_3_weeks, time_end_4_weeks, port_berth_liquid_terminals, terminal_data, 0.1)) 764 env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 765 time_end_3_weeks, time_end_4_weeks, port_berth_drybulk_terminals, terminal_data, 0.1)) 766 767 # For 4th week, 10% of berths have one crane down, 25% of (same) liquid and dry bulk terminals have one pipeline and conveyor down in 10% of berths 768 # env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 769 # time_end_3_weeks, time_end_1_month, port_berths_container_terminals, terminal_data, 0.1)) 770 # env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 771 # time_end_3_weeks, time_end_1_month, port_berth_liquid_terminals, terminal_data, 0.1)) 772 # env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 773 # time_end_3_weeks, time_end_1_month, port_berth_drybulk_terminals, terminal_data, 0.1)) 774 775 # for the first week only 50% of trucks/trains arrive and for the second week only 80% of trucks/trains arrive 776 777 truck_time_arrival_dict = { 778 (time_start, time_start + disruption_duration): 0.0, # no trucks during hurricane 779 (time_end_1_week, time_end_2_week): 0.1, 780 (time_end_2_week, time_end_3_weeks): 0.3, 781 (time_end_3_weeks, time_end_4_weeks): 0.5 782 } 783 784 train_time_arrival_dict = { 785 (time_start, time_start + disruption_duration): 0.0, # no trains during hurricane 786 (time_end_1_week, time_end_2_week): 0.0, 787 (time_end_2_week, time_end_3_weeks): 0.5 788 } 789 790 filter_truck_data(run_id, truck_time_arrival_dict) 791 filter_train_data(run_id, train_time_arrival_dict) 792 793 elif TROPICAL_STORM == True and SEVERE_HURRICANE == False: 794 795 time_start = HURRICANE_START 796 disruption_duration = 24*4 # 4 days 797 798 time_end_1_week = HURRICANE_START + 24*7 # 7 days 799 time_end_2_week = HURRICANE_START + 24*14 # 14 days 800 801 # No damage to port facilities 802 803 # No vessel arrivals for 1 week 804 stop_and_bulk_arrival(time_start, time_start + disruption_duration, run_id, plot_input=True) 805 806 # for the first week only 70% of trucks arrive and no change to trains 807 truck_time_arrival_dict = { 808 (time_start, time_end_1_week): 0.5, 809 (time_end_1_week, time_end_2_week): 0.8 810 } 811 812 filter_truck_data(run_id, truck_time_arrival_dict) 813 814 elif SEVERE_HURRICANE == True and TROPICAL_STORM == True: 815 print("Error: Both SEVERE_HURRICANE and TROPICAL_STORM flags cannot be True at the same time; update the config file.") 816 817 818def disruption_report_smoothed(smooth_window_points: int = 30, plot_legend: bool = False) -> str: 819 """ 820 Inputs: 821 - smooth_window_points: window size (odd integer) for rolling mean smoothing of raw queue 822 Outputs (returned as a formatted string): 823 1) times: onset, queue_in_point, mobilisation, peak, recovery 824 2) a blank line 825 3) durations: onset→queue_in_point, queue_in_point→mobilisation, mobilisation→peak, peak→recovery 826 4) a blank line 827 5) areas: positive (above baseline), negative (below baseline) 828 Note: All event times are identified on RAW data, but areas are computed using SMOOTHED data. 829 830 """ 831 warmup_time = WARMUP_ITERS 832 onset_time = HURRICANE_START 833 834 logfileid = f"{int(NUM_MONTHS)}_months_{NUM_RUNS}_runs_run_{LOG_NUM}" 835 df_location = f'collatedResults/{logfileid}/data/all_queue.csv' 836 837 df_raw = pd.read_csv(df_location) 838 df_raw = df_raw.rename(columns={df_raw.columns[0]: "time"}) 839 df_raw["mean"] = df_raw.iloc[:, 1:].mean(axis=1) 840 841 time_col = "time" 842 queue_col = "mean" 843 df = df_raw[[time_col, queue_col]].rename(columns={time_col: "time_hr", queue_col: "q_raw"}) 844 845 df = df.sort_values("time_hr").reset_index(drop=True) 846 df["time_hr"] = pd.to_numeric(df["time_hr"], errors="coerce") 847 df["q_raw"] = pd.to_numeric(df["q_raw"], errors="coerce") 848 df = df.dropna(subset=["time_hr", "q_raw"]).reset_index(drop=True) 849 850 # Smoothing for areas only 851 w = int(smooth_window_points) 852 if w < 3: w = 3 853 if w % 2 == 0: w += 1 854 df["q_smooth"] = df["q_raw"].rolling(window=w, center=True, min_periods=1).mean() 855 856 # Baseline from [1000, 1500] on RAW 857 t0, t1 = warmup_time, onset_time 858 base_mask = (df["time_hr"] >= t0) & (df["time_hr"] <= t1) 859 baseline = float(df.loc[base_mask, "q_raw"].mean()) 860 861 # A: onset 862 idx_A = int((df["time_hr"] - onset_time).abs().idxmin()) 863 A_t = float(df.at[idx_A, "time_hr"]); A_q = float(df.at[idx_A, "q_raw"]) 864 865 # t_queue_in_point: point where queue starts to increase (2 positive slopes) 866 dQdt = df["q_raw"].diff() / df["time_hr"].diff() 867 idx_queue_in = None 868 for i in range(idx_A + 1, len(df) - 2): 869 if (dQdt.iloc[i] > 0) and (dQdt.iloc[i + 1] > 0): 870 idx_queue_in = i; break 871 if idx_queue_in is None: idx_queue_in = min(idx_A + 1, len(df) - 1) 872 t_queue_in = float(df.at[idx_queue_in, "time_hr"]); q_queue_in = float(df.at[idx_queue_in, "q_raw"]) 873 874 # B: mobilisation (first upward crossing to baseline after t_queue_in_point) 875 idx_B = None 876 for i in range(idx_queue_in, len(df)): 877 if df.at[i, "q_raw"] >= baseline: 878 idx_B = i; break 879 if idx_B is None: idx_B = len(df) - 1 880 B_t = float(df.at[idx_B, "time_hr"]); B_q = baseline 881 882 # C: peak (raw max after mobilisation) 883 idx_C = int(df.loc[df["time_hr"] >= B_t, "q_raw"].idxmax()) 884 C_t = float(df.at[idx_C, "time_hr"]); C_q = float(df.at[idx_C, "q_raw"]) 885 886 # D: recovery (first downward crossing to baseline after C; linear interpolation on RAW) 887 # D_t, D_q = None, None 888 # for i in range(idx_C, len(df) - 1): 889 # y0, y1 = df.at[i, "q_raw"], df.at[i + 1, "q_raw"] 890 # x0, x1 = df.at[i, "time_hr"], df.at[i + 1, "time_hr"] 891 # if (y0 > baseline) and (y1 <= baseline): 892 # D_t = float(x0 + (baseline - y0) * (x1 - x0) / (y1 - y0)) if y1 != y0 else float(x1) 893 # D_q = baseline 894 # break 895 # if D_t is None: 896 # D_t = float(df.at[len(df) - 1, "time_hr"]); D_q = float(df.at[len(df) - 1, "q_raw"]) 897 898 # D: recovery (average queue over 2 weeks post-recovery point is <= baseline) 899 two_weeks_in_hours = 14 * 24 900 D_t, D_q = None, None 901 for i in range(idx_C, len(df)): 902 # Calculate the end point of the 2-week window 903 end_time = df.at[i, "time_hr"] + two_weeks_in_hours 904 905 # Get the subset of data for the 2-week window 906 window_mask = (df["time_hr"] >= df.at[i, "time_hr"]) & (df["time_hr"] <= end_time) 907 window_data = df.loc[window_mask, "q_raw"] 908 909 # If there's enough data for a full 2-week window, check the average 910 if len(window_data) >= 1: # Ensure there is at least one data point 911 avg_queue = window_data.mean() 912 if avg_queue <= baseline: 913 D_t = float(df.at[i, "time_hr"]) 914 D_q = float(df.at[i, "q_raw"]) 915 break 916 917 # Fallback in case no such point is found 918 if D_t is None: 919 D_t = float(df.at[len(df) - 1, "time_hr"]); D_q = float(df.at[len(df) - 1, "q_raw"]) 920 921 # Durations 922 dur_A_queue_in = t_queue_in - A_t 923 dur_queue_in_B = B_t - t_queue_in 924 dur_B_C = C_t - B_t 925 dur_C_D = D_t - C_t 926 927 # Areas between A and D using q_smooth 928 mask_AD = (df["time_hr"] >= A_t) & (df["time_hr"] <= D_t) 929 xs = df.loc[mask_AD, "time_hr"].to_numpy(dtype=float) 930 ys_sm = df.loc[mask_AD, "q_smooth"].to_numpy(dtype=float) 931 if xs.size == 0 or xs[-1] < D_t: 932 xs = np.append(xs, D_t); ys_sm = np.append(ys_sm, baseline) 933 area_pos = float(np.trapz(np.maximum(0.0, ys_sm - baseline), xs)) 934 area_neg = float(np.trapz(np.maximum(0.0, baseline - ys_sm), xs)) 935 936 # Calculate new metrics 937 # Robustness (R): R = Q(t_min) / Q_0_bar 938 idx_min = int(df.loc[(df["time_hr"] >= A_t) & (df["time_hr"] <= t_queue_in), "q_raw"].idxmin()) 939 q_min = df.at[idx_min, "q_raw"] 940 robustness = q_min / baseline 941 942 # Mobilization (M): M = time from onset to mobilisation 943 mobilization = B_t - A_t 944 945 # Peak Queue Increase (P): P = (Q(t_max) - Q_0_bar) / Q_0_bar 946 peak_increase = (C_q - baseline) / baseline 947 948 # Max Recovery Rate (m_max): slope of the next 'n' points from the peak 949 # Initialize variables to store the most negative slope and the corresponding lookahead 950 max_negative_rate = 0.0 951 best_lookahead = 0 952 best_lookahead_idx = idx_C 953 954 # Loop through lookahead values from 5 to 50 955 for recovery_rate_lookahead in range(5, 51): 956 idx_C_plus_n = min(idx_C + recovery_rate_lookahead, len(df) - 1) 957 if idx_C_plus_n > idx_C: 958 rate = (df.at[idx_C_plus_n, "q_raw"] - C_q) / (df.at[idx_C_plus_n, "time_hr"] - C_t) 959 else: 960 rate = 0.0 961 962 # Check if the current rate is more negative than the current maximum negative rate 963 if rate < max_negative_rate: 964 max_negative_rate = rate 965 best_lookahead = recovery_rate_lookahead 966 best_lookahead_idx = idx_C_plus_n 967 968 969 970 # drop lat 200 points to avoid edge effects in smoothing 971 df = df.iloc[:-200, :].reset_index(drop=True) 972 973 # Plot (PDF): raw & smoothed; shading from smoothed 974 logfileid = f"{int(NUM_MONTHS)}_months_{NUM_RUNS}_runs_run_{LOG_NUM}" 975 pdf_path = f'collatedResults/{logfileid}/hurricane_report.pdf' 976 plt.figure(figsize=(16, 6.5)) 977 plt.plot(df["time_hr"], df["q_raw"], label="Queue (raw)", alpha=0.5) 978 plt.plot(df["time_hr"], df["q_smooth"], label=f"Queue (smoothed)", linewidth=2) 979 plt.axhline(baseline, linestyle="--", label="Baseline") 980 plt.fill_between(xs, ys_sm, baseline, where=(ys_sm < baseline), alpha=0.3, label="Lost service", color="orange") 981 plt.fill_between(xs, ys_sm, baseline, where=(ys_sm > baseline), alpha=0.2, label="Excess queuing", color="pink") 982 983 # shade the warmup period 984 plt.axvspan(0, warmup_time, color='gray', alpha=0.2, label="Warmup period") 985 986 # Plot the max recovery rate line segment 987 x_rate = [C_t, df.at[best_lookahead_idx, "time_hr"]] 988 y_rate = [C_q, df.at[best_lookahead_idx, "q_raw"]] 989 plt.plot(x_rate, y_rate, 'g--', label="Max recovery rate", linewidth=2) 990 991 for (xt, yv), lbl in [((A_t, A_q), "Onset"), ((B_t, B_q), "Mobilisation"), 992 ((C_t, C_q), "Peak"), ((D_t, D_q), "Recovery")]: 993 plt.scatter([xt], [yv], s=64, label=lbl) 994 plt.xlabel("Time (hr)", fontsize=16); plt.ylabel("Queue length", fontsize=16); plt.tight_layout() 995 if plot_legend: 996 plt.legend(fontsize=14) 997 plt.xticks(fontsize=14); plt.yticks(fontsize=14) 998 plt.savefig(pdf_path, dpi=200, bbox_inches="tight") 999 plt.show() 1000 1001 lines = [ 1002 f"onset: {A_t:.3f}", 1003 f"queue-in-point: {t_queue_in:.3f}", 1004 f"mobilisation: {B_t:.3f}", 1005 f"peak: {C_t:.3f}", 1006 f"recovery: {D_t:.3f}", 1007 "", 1008 f"time onset to queue-in-point (days): {dur_A_queue_in / 24:.1f}", 1009 f"time queue-in-point to mobilisation (days): {dur_queue_in_B / 24:.1f}", 1010 f"time mobilisation to peak (days): {dur_B_C / 24:.1f}", 1011 f"time peak to recovery (days): {dur_C_D / 24:.1f}", 1012 "", 1013 f"area above baseline (vessel days): {area_pos / 24:.2f}", 1014 f"area below baseline (vessel days): {area_neg / 24:.2f}", 1015 "", 1016 f"Robustness: {robustness:.2f}", 1017 f"Mobilization: {mobilization:.2f}", 1018 f"Peak Queue Increase: {peak_increase:.2f}", 1019 f"Max Recovery Rate: {max_negative_rate:.2f}" 1020 ] 1021 1022 with open(f'collatedResults/{logfileid}/hurricane_report.txt', 'w') as f: 1023 for line in lines: 1024 f.write(line + "\n") 1025 print(line) 1026 1027 return "\n".join(lines), str(pdf_path) 1028 1029 1030def fog_peak_slopes(slope_steps: int = 5, baseline_end: float = 2*7*14): 1031 """ 1032 - Shades inbound/outbound closure windows. 1033 - For each pair i, finds the raw peak within the union window [min(in_i, out_i), max(in_i, out_i)]. 1034 - Computes slope from peak -> next `slope_steps` points: (q[t+k]-q[t])/(time[t+k]-time[t]). 1035 - Produces TWO plots: 1036 1) Full view: queue with inbound/outbound shaded (no slope segments). 1037 2) Zoomed to [first_closure_start, last_closure_end]: queue with closures shaded AND slope segments. 1038 - Prints a compact table (no CSV) with: i, peak_time, peak_queue, slope_peak_to_nextK. 1039 1040 Uses default Matplotlib colors; markers are circles; no text annotations. 1041 """ 1042 # ---- Load data ---- 1043 warmup_time = WARMUP_ITERS 1044 inbound_closed = INBOUND_CLOSED 1045 outbound_closed = OUTBOUND_CLOSED 1046 1047 logfileid = f"{int(NUM_MONTHS)}_months_{NUM_RUNS}_runs_run_{LOG_NUM}" 1048 df_location = f'collatedResults/{logfileid}/data/all_queue.csv' 1049 1050 df_raw = pd.read_csv(df_location) 1051 df_raw = df_raw.rename(columns={df_raw.columns[0]: "time"}) 1052 df_raw["mean"] = df_raw.iloc[:, 1:].mean(axis=1) 1053 1054 1055 time_col = "time" 1056 mean_col = "mean" 1057 df = df_raw[[time_col, mean_col]].rename(columns={time_col:"time_hr", mean_col:"q_raw"}) 1058 1059 df = df.sort_values("time_hr").reset_index(drop=True) 1060 df["time_hr"] = pd.to_numeric(df["time_hr"], errors="coerce") 1061 df["q_raw"] = pd.to_numeric(df["q_raw"], errors="coerce") 1062 df = df.dropna(subset=["time_hr","q_raw"]).reset_index(drop=True) 1063 1064 # Baseline from 1000 to 1500 (raw) 1065 base_mask = (df["time_hr"] >= warmup_time) & (df["time_hr"] <= baseline_end) 1066 baseline = float(df.loc[base_mask, "q_raw"].mean()) 1067 1068 # ---- Per-pair peak and slope to next K steps ---- 1069 results = [] 1070 n = min(len(inbound_closed), len(outbound_closed)) 1071 for i in range(n): 1072 a0, a1 = inbound_closed[i] 1073 b0, b1 = outbound_closed[i] 1074 win_start = min(a0, b0) 1075 win_end = max(a1, b1) 1076 mask_win = (df["time_hr"] >= win_start) & (df["time_hr"] <= win_end) 1077 if mask_win.sum() == 0: 1078 continue 1079 idx_peak = int(df.loc[mask_win, "q_raw"].idxmax()) 1080 t_peak = float(df.at[idx_peak, "time_hr"]) 1081 q_peak = float(df.at[idx_peak, "q_raw"]) 1082 1083 # Check if the queue decreases after the peak 1084 idx_k = min(idx_peak + max(1, int(slope_steps)), len(df)-1) 1085 t_k = float(df.at[idx_k, "time_hr"]) 1086 q_k = float(df.at[idx_k, "q_raw"]) 1087 1088 # Only calculate and append the result if the queue decreases 1089 if q_k <= q_peak: 1090 slope = (q_k - q_peak)/(t_k - t_peak) if (t_k - t_peak) != 0 else np.nan 1091 results.append({ 1092 "i": i+1, 1093 "peak_time": t_peak, 1094 "peak_queue": q_peak, 1095 "nextK_time": t_k, 1096 "nextK_queue": q_k, 1097 "slope_peak_to_nextK": slope 1098 }) 1099 1100 # ---- PLOTS ---- 1101 # 1) Full view: open/closed only 1102 pdf_full = f'collatedResults/{logfileid}/fog_report_full.pdf' 1103 export_prefix = f'collatedResults/{logfileid}/fog_report' 1104 1105 plt.figure(figsize=(14,7)) 1106 plt.plot(df["time_hr"], df["q_raw"], label="Queue") 1107 plt.axhline(baseline, linestyle="--", label="Baseline") 1108 for (s,e) in inbound_closed: 1109 plt.axvspan(s, e, alpha=0.18, color= 'green', label="Inbound closed" if (s,e)==inbound_closed[0] else None) 1110 for (s,e) in outbound_closed: 1111 plt.axvspan(s, e, alpha=0.18, color = 'orange', label="Outbound closed" if (s,e)==outbound_closed[0] else None) 1112 plt.xlabel("Time (hr)", fontsize=16); plt.ylabel("Queue length", fontsize=16); plt.legend(loc="best", fontsize=16) 1113 plt.xticks(fontsize=14); plt.yticks(fontsize=14) 1114 plt.tight_layout(); plt.savefig(pdf_full, dpi=200, bbox_inches="tight"); plt.show(); plt.close() 1115 1116 # 2) Zoomed: show slopes too 1117 pdf_zoom = Path(f"{export_prefix}_zoom.pdf") 1118 fog_start = min(min(x[0] for x in inbound_closed), min(x[0] for x in outbound_closed)) 1119 fog_end = max(max(x[1] for x in inbound_closed), max(x[1] for x in outbound_closed)) 1120 m = (df["time_hr"] >= fog_start) & (df["time_hr"] <= fog_end) 1121 plt.figure(figsize=(14,7)) 1122 plt.plot(df.loc[m, "time_hr"], df.loc[m, "q_raw"], label="Queue") 1123 plt.axhline(baseline, linestyle="--", label="Baseline") 1124 for (s,e) in inbound_closed: 1125 if e>=fog_start and s<=fog_end: 1126 plt.axvspan(s, e, alpha=0.18, color = 'green', label="Inbound closed" if (s,e)==inbound_closed[0] else None) 1127 for (s,e) in outbound_closed: 1128 if e>=fog_start and s<=fog_end: 1129 plt.axvspan(s, e, alpha=0.18, color = 'orange', label="Outbound closed" if (s,e)==outbound_closed[0] else None) 1130 # circle markers + slope segments 1131 for r in results: 1132 if fog_start <= r["peak_time"] <= fog_end: 1133 plt.plot([r["peak_time"]], [r["peak_queue"]], marker='o', linestyle='None', color='green', markersize=8) 1134 plt.plot([r["peak_time"], r["nextK_time"]], 1135 [r["peak_queue"], r["nextK_queue"]], 1136 linestyle='--', linewidth=3, color = 'k') 1137 plt.xlabel("Time (hr)",fontsize=16); plt.ylabel("Queue length",fontsize=16) 1138 plt.legend( 1139 loc="upper center", # anchor relative to top center 1140 bbox_to_anchor=(0.5, -0.1), # shift it below the axes 1141 fontsize=16, 1142 ncol=4 # optional: spread entries into multiple columns 1143 ) 1144 plt.xticks(fontsize=14); plt.yticks(fontsize=14) 1145 plt.tight_layout(); plt.savefig(pdf_zoom, dpi=200, bbox_inches="tight"); plt.show(); plt.close() 1146 1147 # ---- PRINT TABLE ---- 1148 print("i, peak_time, peak_queue, slope_peak_to_nextK") 1149 for r in results: 1150 print(f"{r['i']}, {r['peak_time']:.3f}, {r['peak_queue']:.3f}, {r['slope_peak_to_nextK']:.6f}") 1151 print("\nAverage slope is", np.mean([r["slope_peak_to_nextK"] for r in results])) 1152 1153 # save results as a text file 1154 with open(f'collatedResults/{logfileid}/fog_report.txt', 'w') as f: 1155 f.write("i, peak_time, peak_queue, slope_peak_to_nextK\n") 1156 for r in results: 1157 f.write(f"{r['i']}, {r['peak_time']:.3f}, {r['peak_queue']:.3f}, {r['slope_peak_to_nextK']:.6f}\n") 1158 1159 return str(pdf_full), str(pdf_zoom) 1160 1161 1162 1163
20def reduce_cranes(env, change, terminals, time_start, time_end, port_berths_container_terminals, terminal_data_cache, berths_aff_per): 21 """ 22 This function reduces the number of cranes in each berth by `change` amount by removing cranes from the berth's crane store between `time_start` and `time_end`. 23 It also restores the cranes after the reduction period. 24 Args: 25 env (simpy.Environment): The simulation environment. 26 change (int): The number of cranes to be removed (negative value) or added (positive value). 27 terminals (list): List of terminal indices where the cranes will be reduced. 28 time_start (int): The time at which the reduction starts. 29 time_end (int): The time at which the reduction ends. 30 port_berths_container_terminals (dict): Dictionary containing berth information for container terminals. 31 terminal_data_cache (dict): Cache containing terminal data for crane transfer rates. 32 berths_aff_per (float): Percentage of berths to be affected by the change. 33 Returns: 34 None 35 """ 36 port_berth_initially = {} 37 for terminal_idx in terminals: 38 port_berth_initially[terminal_idx] = [] 39 for berths in port_berths_container_terminals[terminal_idx].items: 40 port_berth_initially[terminal_idx].append(berths) 41 42 print( 43 f"Modelling disruption: Attempting to remove {-change} cranes from each berth in terminals {terminals} from time {time_start} to {time_end}") 44 45 yield env.timeout(time_start) 46 47 for terminal_idx in terminals: 48 num_berths = len(port_berth_initially[terminal_idx]) 49 berths_aff = int(berths_aff_per*num_berths) 50 count = 0 51 for berth in port_berth_initially[terminal_idx]: 52 count += 1 53 if change < 0: 54 num_to_remove = min(len(berth.cranes.items), abs(change)) 55 for _ in range(num_to_remove): 56 if berth.cranes.items: 57 crane = yield berth.cranes.get() 58 if count >= berths_aff: 59 break 60 61 yield env.timeout(time_end - time_start) 62 63 for terminal_idx in terminals: 64 num_berths = len(port_berth_initially[terminal_idx]) 65 berths_aff = int(berths_aff_per*num_berths) 66 count = 0 67 for berth in port_berth_initially[terminal_idx]: 68 count += 1 69 if change < 0: 70 for i in range(abs(change)): 71 restored_crane = Crane(id=f"restored_{berth.id}_{i}", width="width", crane_transfer_rate=get_value_by_terminal( 72 terminal_data_cache, 'Container', terminal_idx+1, 'transfer rate per unit')) 73 yield berth.cranes.put(restored_crane) 74 if count >= berths_aff: 75 break
This function reduces the number of cranes in each berth by change amount by removing cranes from the berth's crane store between time_start and time_end.
It also restores the cranes after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- change (int): The number of cranes to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the cranes will be reduced.
- time_start (int): The time at which the reduction starts.
- time_end (int): The time at which the reduction ends.
- port_berths_container_terminals (dict): Dictionary containing berth information for container terminals.
- terminal_data_cache (dict): Cache containing terminal data for crane transfer rates.
- berths_aff_per (float): Percentage of berths to be affected by the change.
Returns:
None
78def reduce_pipelines(env, change, terminals, time_start, time_end, port_berth_liquid_terminals, terminal_data_cache, berths_aff_per): 79 """ 80 This function reduces the number of pipelines in each berth by `change` amount by removing pipelines from the berth's pipeline store between `time_start` and `time_end`. 81 It also restores the pipelines after the reduction period. 82 Args: 83 env (simpy.Environment): The simulation environment. 84 change (int): The number of pipelines to be removed (negative value) or added (positive value). 85 terminals (list): List of terminal indices where the pipelines will be reduced. 86 time_start (int): The time at which the reduction starts. 87 time_end (int): The time at which the reduction ends. 88 port_berth_liquid_terminals (dict): Dictionary containing berth information for liquid terminals. 89 terminal_data_cache (dict): Cache containing terminal data for pipeline transfer rates. 90 berths_aff_per (float): Percentage of berths to be affected by the change. 91 Returns: 92 None 93 """ 94 port_berth_initially = {} 95 for terminal_ids in terminals: 96 terminal_idx = terminal_ids-1 97 port_berth_initially[terminal_idx] = [] 98 for berths in port_berth_liquid_terminals[terminal_idx].items: 99 port_berth_initially[terminal_idx].append(berths) 100 101 print( 102 f"Modelling disruption: Attempting to remove {-change} pipelines from each berth in terminals {terminals} from time {time_start} to {time_end}") 103 104 yield env.timeout(time_start) 105 106 for terminal_ids in terminals: 107 terminal_idx = terminal_ids-1 108 num_berths = len(port_berth_initially[terminal_idx]) 109 berths_aff = int(berths_aff_per*num_berths) 110 count = 0 111 for berth in port_berth_initially[terminal_idx]: 112 if change < 0: 113 num_to_remove = min(len(berth.pipelines.items), abs(change)) 114 for _ in range(num_to_remove): 115 if berth.pipelines.items: 116 pipeline = yield berth.pipelines.get() 117 count += 1 118 if count >= berths_aff: 119 break 120 121 yield env.timeout(time_end - time_start) 122 123 for terminal_ids in terminals: 124 terminal_idx = terminal_ids-1 125 num_berths = len(port_berth_initially[terminal_idx]) 126 berths_aff = int(berths_aff_per*num_berths) 127 count = 0 128 for berth in port_berth_initially[terminal_idx]: 129 count += 1 130 if change < 0: 131 for i in range(abs(change)): 132 restored_pipeline = Pipeline(env=env, id=f"restored_{berth.id}_{i}", pump_rate_per_pipeline=get_value_by_terminal( 133 terminal_data_cache, "Liquid", terminal_ids+1, "transfer rate per unit")) 134 135 yield berth.pipelines.put(restored_pipeline) 136 if count >= berths_aff: 137 break
This function reduces the number of pipelines in each berth by change amount by removing pipelines from the berth's pipeline store between time_start and time_end.
It also restores the pipelines after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- change (int): The number of pipelines to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the pipelines will be reduced.
- time_start (int): The time at which the reduction starts.
- time_end (int): The time at which the reduction ends.
- port_berth_liquid_terminals (dict): Dictionary containing berth information for liquid terminals.
- terminal_data_cache (dict): Cache containing terminal data for pipeline transfer rates.
- berths_aff_per (float): Percentage of berths to be affected by the change.
Returns:
None
140def reduce_conveyors(env, change, terminals, time_start, time_end, port_berth_drybulk_terminals, terminal_data_cache, berths_aff_per): 141 """ 142 This function reduces the number of conveyors in each berth by `change` amount by removing conveyors from the berth's conveyor store between `time_start` and `time_end`. 143 It also restores the conveyors after the reduction period. 144 Args: 145 env (simpy.Environment): The simulation environment. 146 change (int): The number of conveyors to be removed (negative value) or added (positive value). 147 terminals (list): List of terminal indices where the conveyors will be reduced. 148 time_start (int): The time at which the reduction starts. 149 time_end (int): The time at which the reduction ends. 150 port_berth_drybulk_terminals (dict): Dictionary containing berth information for dry bulk terminals. 151 terminal_data_cache (dict): Cache containing terminal data for conveyor transfer rates. 152 berths_aff_per (float): Percentage of berths to be affected by the change. 153 Returns: 154 None 155 """ 156 port_berth_initially = {} 157 for terminal_idx in terminals: 158 port_berth_initially[terminal_idx] = [] 159 for berths in port_berth_drybulk_terminals[terminal_idx].items: 160 port_berth_initially[terminal_idx].append(berths) 161 162 print( 163 f"Modelling disruption: Attempting to remove {-change} conveyor from each berth in terminals {terminals} from time {time_start} to {time_end}") 164 165 yield env.timeout(time_start) 166 167 for terminal_idx in terminals: 168 num_berths = len(port_berth_initially[terminal_idx]) 169 berths_aff = int(berths_aff_per*num_berths) 170 count = 0 171 for berth in port_berth_initially[terminal_idx]: 172 count += 1 173 if change < 0: 174 num_to_remove = min(len(berth.conveyors.items), abs(change)) 175 for _ in range(num_to_remove): 176 if berth.conveyors.items: 177 conveyor = yield berth.conveyors.get() 178 if count >= berths_aff: 179 break 180 181 yield env.timeout(time_end - time_start) 182 183 for terminal_idx in terminals: 184 num_berths = len(port_berth_initially[terminal_idx]) 185 berths_aff = int(berths_aff_per*num_berths) 186 count = 0 187 188 for berth in port_berth_initially[terminal_idx]: 189 count += 1 190 if change < 0: 191 for i in range(abs(change)): 192 restored_conveyor = Conveyor(env=env, id=f"restored_{berth.id}_{i}", conveyor_rate_per_conveyor=get_value_by_terminal( 193 terminal_data_cache, "DryBulk", terminal_idx+1, "transfer rate per unit")) 194 yield berth.conveyors.put(restored_conveyor) 195 if count >= berths_aff: 196 break
This function reduces the number of conveyors in each berth by change amount by removing conveyors from the berth's conveyor store between time_start and time_end.
It also restores the conveyors after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- change (int): The number of conveyors to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the conveyors will be reduced.
- time_start (int): The time at which the reduction starts.
- time_end (int): The time at which the reduction ends.
- port_berth_drybulk_terminals (dict): Dictionary containing berth information for dry bulk terminals.
- terminal_data_cache (dict): Cache containing terminal data for conveyor transfer rates.
- berths_aff_per (float): Percentage of berths to be affected by the change.
Returns:
None
199def reduce_berths(env, change, terminals, time_start, time_end, port_berths_terminal): 200 """ 201 This function reduces the number of berths in each terminal by `change` amount by removing berths from the terminal's berth store between `time_start` and `time_end`. 202 It also restores the berths after the reduction period. 203 Args: 204 env (simpy.Environment): The simulation environment. 205 change (int): The number of berths to be removed (negative value) or added (positive value). 206 terminals (list): List of terminal indices where the berths will be reduced. 207 time_start (int): The time at which the reduction starts. 208 time_end (int): The time at which the reduction ends. 209 port_berths_terminal (dict): Dictionary containing berth information for the terminals. 210 Returns: 211 None 212 """ 213 print( 214 f"Modelling disruption: Attempting to remove {-change} berths from terminals {terminals} from time {time_start} to {time_end}") 215 216 yield env.timeout(time_start) 217 218 removed_berths = {terminal: [] for terminal in terminals} 219 220 # Reduce capacity by taking items out of the store 221 for terminal in terminals: 222 for _ in range(abs(change)): 223 # Remove a berth to simulate reduced capacity 224 berth = yield port_berths_terminal[terminal].get() 225 removed_berths[terminal].append(berth) 226 print( 227 f"Terminal {terminal}: Berth capacity effectively reduced by {abs(change)} at time {env.now}") 228 229 yield env.timeout(time_end - time_start) 230 231 # Restore the removed berths to the store 232 for terminal in terminals: 233 for berth in removed_berths[terminal]: 234 yield port_berths_terminal[terminal].put(berth) 235 print( 236 f"Terminal {terminal}: Berth Capacity restored by {abs(change)} at time {env.now}")
This function reduces the number of berths in each terminal by change amount by removing berths from the terminal's berth store between time_start and time_end.
It also restores the berths after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- change (int): The number of berths to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the berths will be reduced.
- time_start (int): The time at which the reduction starts.
- time_end (int): The time at which the reduction ends.
- port_berths_terminal (dict): Dictionary containing berth information for the terminals.
Returns:
None
239def reduce_yard(env, change, terminals, time_start, time_end, port_yard_container_terminals): 240 """ 241 This function reduces the yard capacity in each terminal by `change` amount by adding dummy containers to the terminal's yard store between `time_start` and `time_end`. 242 It also restores the yard capacity after the reduction period. 243 Args: 244 env (simpy.Environment): The simulation environment. 245 change (int): The number of yard spaces to be removed (negative value) or added (positive value). 246 terminals (list): List of terminal indices where the yard capacity will be reduced. 247 time_start (int): The time at which the reduction starts. 248 time_end (int): The time at which the reduction ends. 249 port_yard_container_terminals (dict): Dictionary containing yard information for the terminals. 250 Returns: 251 None""" 252 253 print( 254 f"Modelling disruption: Attempting to remove {-change} units yard space from terminals {terminals} from time {time_start} to {time_end} (Adding dummy containers)") 255 256 yield env.timeout(time_start) 257 258 dummy_containers = {terminal: [] for terminal in terminals} 259 260 for terminal in terminals: 261 for _ in range(abs(change)): 262 ctr = yield port_yard_container_terminals[terminal].put() 263 dummy_containers[terminal].append(ctr) 264 print( 265 f"Terminal {terminal}: Yard capacity effectively reduced by {abs(change)} ctrs at time {env.now}") 266 267 yield env.timeout(time_end - time_start) 268 269 # Restore the removed berths to the store 270 for terminal in terminals: 271 for berth in dummy_containers[terminal]: 272 yield dummy_containers[terminal].get(berth) 273 print( 274 f"Terminal {terminal}: Yard apacity restored by {abs(change)} at time {env.now}")
This function reduces the yard capacity in each terminal by change amount by adding dummy containers to the terminal's yard store between time_start and time_end.
It also restores the yard capacity after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- change (int): The number of yard spaces to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the yard capacity will be reduced.
- time_start (int): The time at which the reduction starts.
- time_end (int): The time at which the reduction ends.
- port_yard_container_terminals (dict): Dictionary containing yard information for the terminals.
Returns:
None
277def reduce_tank_capacity(env, change, terminals, time_start, time_end, port_tanks_liquid_terminals): 278 """ 279 This function reduces the tank capacity in each terminal by `change` amount by adding dummy liquid to the terminal's tank store between `time_start` and `time_end`. 280 It also restores the tank capacity after the reduction period. 281 Args: 282 env (simpy.Environment): The simulation environment. 283 change (int): The number of tank spaces to be removed (negative value) or added (positive value). 284 terminals (list): List of terminal indices where the tank capacity will be reduced. 285 time_start (int): The time at which the reduction starts. 286 time_end (int): The time at which the reduction ends. 287 port_tanks_liquid_terminals (dict): Dictionary containing tank information for the terminals. 288 Returns: 289 None 290 """ 291 print( 292 f"Modelling disruption: Attempting to remove {-change} units tank space from terminals {terminals} from time {time_start} to {time_end} (Adding dummy liquid)") 293 294 yield env.timeout(time_start) 295 296 # Reduce the content of the tanks 297 for terminal in terminals: 298 yield port_tanks_liquid_terminals[terminal].put(change) 299 print( 300 f"Terminal {terminal}: Tank capacity effectively reduced by {abs(change)} at time {env.now}") 301 302 yield env.timeout(time_end - time_start) 303 304 # Restore the content of the tanks 305 for terminal in terminals: 306 yield port_tanks_liquid_terminals[terminal].get(change) 307 print( 308 f"Terminal {terminal}: Tank capacity effectively increased by {abs(change)} at time {env.now}")
This function reduces the tank capacity in each terminal by change amount by adding dummy liquid to the terminal's tank store between time_start and time_end.
It also restores the tank capacity after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- change (int): The number of tank spaces to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the tank capacity will be reduced.
- time_start (int): The time at which the reduction starts.
- time_end (int): The time at which the reduction ends.
- port_tanks_liquid_terminals (dict): Dictionary containing tank information for the terminals.
Returns:
None
311def reduce_silo_capacity(env, change, terminals, time_start, time_end, port_silos_drybulk_terminals): 312 """ 313 This function reduces the silo capacity in each terminal by `change` amount by adding dummy dry bulk to the terminal's silo store between `time_start` and `time_end`. 314 It also restores the silo capacity after the reduction period. 315 Args: 316 env (simpy.Environment): The simulation environment. 317 change (int): The number of silo spaces to be removed (negative value) or added (positive value). 318 terminals (list): List of terminal indices where the silo capacity will be reduced. 319 time_start (int): The time at which the reduction starts. 320 time_end (int): The time at which the reduction ends. 321 port_silos_drybulk_terminals (dict): Dictionary containing silo information for the terminals. 322 Returns: 323 None 324 """ 325 print( 326 f"Modelling disruption: Attempting to remove {-change} units silo space from terminals {terminals} from time {time_start} to {time_end} (Adding dummy dry bulk)") 327 328 yield env.timeout(time_start) 329 330 # Reduce the content of the silos 331 for terminal in terminals: 332 yield port_silos_drybulk_terminals[terminal].put(change) 333 print( 334 f"Terminal {terminal}: Silo capacity effectively reduced by {abs(change)} at time {env.now}") 335 336 yield env.timeout(time_end - time_start) 337 338 # Restore the content of the silos 339 for terminal in terminals: 340 yield port_silos_drybulk_terminals[terminal].get(change) 341 print( 342 f"Terminal {terminal}: Silo capacity effectively increased by {abs(change)} at time {env.now}")
This function reduces the silo capacity in each terminal by change amount by adding dummy dry bulk to the terminal's silo store between time_start and time_end.
It also restores the silo capacity after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- change (int): The number of silo spaces to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the silo capacity will be reduced.
- time_start (int): The time at which the reduction starts.
- time_end (int): The time at which the reduction ends.
- port_silos_drybulk_terminals (dict): Dictionary containing silo information for the terminals.
Returns:
None
345def reduce_bay_capacity(env, reduction, terminals, start_time, end_time, port_loading_bays_liquid_terminals): 346 """ 347 This function reduces the bay capacity in each terminal by `reduction` amount by holding requests for bays in the terminal's bay store between `start_time` and `end_time`. 348 It also restores the bay capacity after the reduction period. 349 Args: 350 env (simpy.Environment): The simulation environment. 351 reduction (int): The number of bays to be removed (negative value) or added (positive value). 352 terminals (list): List of terminal indices where the bay capacity will be reduced. 353 start_time (int): The time at which the reduction starts. 354 end_time (int): The time at which the reduction ends. 355 port_loading_bays_liquid_terminals (dict): Dictionary containing bay information for the terminals. 356 Returns: 357 None 358 """ 359 360 print( 361 f"Modelling disruption: Attempting to remove {-reduction} bays from terminals {terminals} from time {start_time} to {end_time}") 362 363 yield env.timeout(start_time) 364 365 # Request `reduction` number of bays to reduce the effective capacity 366 requests = [] 367 for terminal in terminals: 368 for _ in range(reduction): 369 request = port_loading_bays_liquid_terminals[terminal].request() 370 yield request # Hold this request to reduce availability 371 requests.append((terminal, request)) 372 print( 373 f"{reduction} units of bays removed from terminal {terminal} at {start_time}") 374 yield env.timeout(end_time - start_time) 375 376 # Release the requests to restore full capacity 377 for terminal, request in requests: 378 port_loading_bays_liquid_terminals[terminal].release(request) 379 print( 380 f"{reduction} units of bays restored from terminal {terminal} at {end_time}")
This function reduces the bay capacity in each terminal by reduction amount by holding requests for bays in the terminal's bay store between start_time and end_time.
It also restores the bay capacity after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- reduction (int): The number of bays to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the bay capacity will be reduced.
- start_time (int): The time at which the reduction starts.
- end_time (int): The time at which the reduction ends.
- port_loading_bays_liquid_terminals (dict): Dictionary containing bay information for the terminals.
Returns:
None
383def reduce_chassis(env, change, terminals, time_start, time_end, port_chassis_container_terminals): 384 """ 385 This function reduces the chassis capacity in each terminal by `change` amount by removing chassis from the terminal's chassis store between `time_start` and `time_end`. 386 It also restores the chassis capacity after the reduction period. 387 Args: 388 env (simpy.Environment): The simulation environment. 389 change (int): The number of chassis to be removed (negative value) or added (positive value). 390 terminals (list): List of terminal indices where the chassis capacity will be reduced. 391 time_start (int): The time at which the reduction starts. 392 time_end (int): The time at which the reduction ends. 393 port_chassis_container_terminals (dict): Dictionary containing chassis information for the terminals. 394 Returns: 395 None 396 """ 397 398 print( 399 f"Modelling disruption: Attempting to remove {-change} chassis from terminals {terminals} from time {time_start} to {time_end}") 400 401 yield env.timeout(time_start) 402 403 # Reduce the content of the silos 404 for terminal in terminals: 405 yield port_chassis_container_terminals[terminal].get(change) 406 print( 407 f"Terminal {terminal}: Number of chassis reduced by {abs(change)} at time {env.now}") 408 409 yield env.timeout(time_end - time_start) 410 411 # Restore the content of the silos 412 for terminal in terminals: 413 yield port_chassis_container_terminals[terminal].put(change) 414 print( 415 f"Terminal {terminal}: Number of chassis increased by {abs(change)} at time {env.now}")
This function reduces the chassis capacity in each terminal by change amount by removing chassis from the terminal's chassis store between time_start and time_end.
It also restores the chassis capacity after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- change (int): The number of chassis to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the chassis capacity will be reduced.
- time_start (int): The time at which the reduction starts.
- time_end (int): The time at which the reduction ends.
- port_chassis_container_terminals (dict): Dictionary containing chassis information for the terminals.
Returns:
None
418def reduce_truck_gates(env, reduction, terminals, start_time, end_time, truck_gates): 419 """ 420 This function reduces the truck gate capacity in each terminal by `reduction` amount by holding requests for truck gates in the terminal's truck gate store between `start_time` and `end_time`. 421 It also restores the truck gate capacity after the reduction period. 422 Args: 423 env (simpy.Environment): The simulation environment. 424 reduction (int): The number of truck gates to be removed (negative value) or added (positive value). 425 terminals (list): List of terminal indices where the truck gate capacity will be reduced. 426 start_time (int): The time at which the reduction starts. 427 end_time (int): The time at which the reduction ends. 428 truck_gates (dict): Dictionary containing truck gate information for the terminals. 429 Returns: 430 None 431 """ 432 print( 433 f"Modelling disruption: Attempting to remove {-reduction} truck gates from terminals {terminals} from time {start_time} to {end_time}") 434 435 yield env.timeout(start_time) 436 437 # Request `reduction` number of bays to reduce the effective capacity 438 requests = [] 439 for terminal in terminals: 440 for _ in range(reduction): 441 request = truck_gates[terminal].request() 442 yield request # Hold this request to reduce availability 443 requests.append((terminal, request)) 444 print( 445 f"{reduction} truck gates removed from terminal {terminal} at {start_time}") 446 yield env.timeout(end_time - start_time) 447 448 # Release the requests to restore full capacity 449 for terminal, request in requests: 450 truck_gates[terminal].release(request) 451 print( 452 f"{reduction} truck gates restored from terminal {terminal} at {end_time}")
This function reduces the truck gate capacity in each terminal by reduction amount by holding requests for truck gates in the terminal's truck gate store between start_time and end_time.
It also restores the truck gate capacity after the reduction period.
Arguments:
- env (simpy.Environment): The simulation environment.
- reduction (int): The number of truck gates to be removed (negative value) or added (positive value).
- terminals (list): List of terminal indices where the truck gate capacity will be reduced.
- start_time (int): The time at which the reduction starts.
- end_time (int): The time at which the reduction ends.
- truck_gates (dict): Dictionary containing truck gate information for the terminals.
Returns:
None
455def adjust_arrival_rate(start_time, end_time, rate_parameter, run_id, plot_input=True): 456 """ 457 This function adjusts the arrival times of ships in the ship data CSV file by a given rate parameter. 458 It modifies the arrival times of ships that fall within the specified time range and saves the updated data back to the CSV file. 459 Args: 460 start_time (float): The start time of the range to adjust arrival times. 461 end_time (float): The end time of the range to adjust arrival times. 462 rate_parameter (float): The factor by which to adjust the arrival times. 463 run_id (str): The unique identifier for the simulation run. 464 plot_input (bool): Whether to plot the input data before and after adjustment. 465 returns: 466 ship_data_df (pd.DataFrame): The updated ship data DataFrame with adjusted arrival times. 467 ship_data (dict): The updated ship data as a dictionary. 468 """ 469 470 ship_data = pd.read_csv(f".{run_id}/logs/ship_data.csv") 471 in_time_range = (ship_data['arrival'] >= start_time) & ( 472 ship_data['arrival'] <= end_time) 473 ship_data['old_arrival'] = ship_data['arrival'] 474 ship_data.loc[in_time_range, 'arrival'] /= rate_parameter 475 476 ship_data_df = ship_data.drop(columns=['interarrival']) 477 ship_data_df = ship_data_df.sort_values('arrival') 478 output_path = f'.{run_id}/logs/ship_data.csv' 479 ship_data_df.to_csv(output_path, index=False) 480 ship_data = ship_data_df.to_dict(orient="index") 481 482 if plot_input: 483 fig, axes = plt.subplots(1, 2, figsize=(14, 6)) 484 axes[0].hist(ship_data_df['old_arrival'], 485 bins=30, color='blue', alpha=0.7) 486 axes[0].set_xlabel('Arrival Time') 487 axes[0].set_ylabel('Frequency') 488 axes[0].set_title('Old Arrival Times') 489 axes[1].hist(ship_data_df['arrival'], bins=30, 490 color='green', alpha=0.7) 491 axes[1].set_xlabel('Arrival Time') 492 axes[1].set_ylabel('Frequency') 493 axes[1].set_title('New Arrival Times') 494 min_x = min(ship_data_df['old_arrival'].min(), 495 ship_data_df['arrival'].min()) 496 max_x = max(ship_data_df['old_arrival'].max(), 497 ship_data_df['arrival'].max()) 498 min_y = 0 499 max_y = max(axes[0].get_ylim()[1], axes[1].get_ylim()[1]) 500 501 for ax in axes: 502 ax.set_xlim(min_x, max_x) 503 ax.set_ylim(min_y, max_y) 504 505 plt.tight_layout() 506 plt.savefig(f".{run_id}/plots/scenarios/factor_arrival_scnario_input") 507 plt.close() 508 509 return ship_data_df, ship_data
This function adjusts the arrival times of ships in the ship data CSV file by a given rate parameter. It modifies the arrival times of ships that fall within the specified time range and saves the updated data back to the CSV file.
Arguments:
- start_time (float): The start time of the range to adjust arrival times.
- end_time (float): The end time of the range to adjust arrival times.
- rate_parameter (float): The factor by which to adjust the arrival times.
- run_id (str): The unique identifier for the simulation run.
- plot_input (bool): Whether to plot the input data before and after adjustment.
returns: ship_data_df (pd.DataFrame): The updated ship data DataFrame with adjusted arrival times. ship_data (dict): The updated ship data as a dictionary.
512def stop_and_bulk_arrival(start_time, end_time, run_id, plot_input=False): 513 """ 514 This function modifies the ship arrival times in the ship data CSV file by stopping arrivals between `start_time` and `end_time`. 515 It also simulates a bulk arrival scenario after `end_time` by distributing the arrivals evenly over a recovery period. 516 Args: 517 start_time (float): The start time of the range to stop arrivals. 518 end_time (float): The end time of the range to stop arrivals. 519 run_id (str): The unique identifier for the simulation run. 520 plot_input (bool): Whether to plot the input data before and after adjustment. 521 returns: 522 ship_data_df (pd.DataFrame): The updated ship data DataFrame with adjusted arrival times. 523 ship_data_dict (dict): The updated ship data as a dictionary. 524 """ 525 526 ship_data = pd.read_csv(f'.{run_id}/logs/ship_data.csv') 527 ship_data['old_arrival'] = ship_data['arrival'] 528 in_time_range = (ship_data['arrival'] >= start_time) & ( 529 ship_data['arrival'] <= end_time) 530 531 num_ships = in_time_range.sum() 532 recovery_period = 24*5 # 5 days 533 gap = recovery_period/num_ships 534 print( 535 f"Modelling disruption: Stopping arrivals between {start_time} and {end_time} and bulk arrivals after {end_time} with a gap of {gap} hours") 536 537 bulk_arrival_time = end_time + 0.1 538 ship_data.loc[in_time_range, 'arrival'] = [ 539 bulk_arrival_time + i * gap for i in range(in_time_range.sum())] 540 if 'interarrival' in ship_data.columns: 541 ship_data = ship_data.drop(columns=['interarrival']) 542 ship_data_df = ship_data.sort_values('arrival') 543 output_path = f'.{run_id}/logs/ship_data.csv' 544 ship_data_df.to_csv(output_path, index=False) 545 ship_data_dict = ship_data_df.to_dict(orient="index") 546 547 if plot_input: 548 fig, axes = plt.subplots(1, 2, figsize=(14, 6)) 549 axes[0].hist(ship_data_df['old_arrival'], 550 bins=30, color='blue', alpha=0.7) 551 axes[0].set_xlabel('Arrival Time') 552 axes[0].set_ylabel('Frequency') 553 axes[0].set_title('Old Arrival Times') 554 axes[1].hist(ship_data_df['arrival'], bins=30, 555 color='green', alpha=0.7) 556 axes[1].set_xlabel('Arrival Time') 557 axes[1].set_ylabel('Frequency') 558 axes[1].set_title('New Arrival Times') 559 min_x = min(ship_data_df['old_arrival'].min(), 560 ship_data_df['arrival'].min()) 561 max_x = max(ship_data_df['old_arrival'].max(), 562 ship_data_df['arrival'].max()) 563 min_y = 0 564 max_y = max(axes[0].get_ylim()[1], axes[1].get_ylim()[1]) 565 566 for ax in axes: 567 ax.set_xlim(min_x, max_x) 568 ax.set_ylim(min_y, max_y) 569 570 plt.tight_layout() 571 plt.savefig(f".{run_id}/plots/scenarios/bulk_arrival_scenario_input") 572 plt.close() 573 574 return ship_data_df, ship_data_dict
This function modifies the ship arrival times in the ship data CSV file by stopping arrivals between start_time and end_time.
It also simulates a bulk arrival scenario after end_time by distributing the arrivals evenly over a recovery period.
Arguments:
- start_time (float): The start time of the range to stop arrivals.
- end_time (float): The end time of the range to stop arrivals.
- run_id (str): The unique identifier for the simulation run.
- plot_input (bool): Whether to plot the input data before and after adjustment.
returns: ship_data_df (pd.DataFrame): The updated ship data DataFrame with adjusted arrival times. ship_data_dict (dict): The updated ship data as a dictionary.
577def filter_data_by_arrival_rate(data, time_arrival_dict, time_column): 578 """ 579 Filter data based on arrival rate factors for different time periods. 580 581 Parameters: 582 ----------- 583 data : pd.DataFrame 584 The dataframe to filter (truck_data or train_data) 585 time_arrival_dict : dict 586 Dictionary with (start_time, end_time): arrival_rate_factor 587 e.g., {(6.0, 10.0): 0.8, (10.0, 15.0): 0.6} 588 arrival_rate_factor of 0.8 means keep 80% of data (remove 20%) 589 time_column : str 590 Name of the time column to use for filtering 591 ('arrival_time' for trucks, 'arrival at' for trains) 592 593 Returns: 594 -------- 595 pd.DataFrame 596 Filtered dataframe with specified removal rates applied 597 """ 598 599 # Create a copy to avoid modifying original data 600 filtered_data = data.copy() 601 602 # Track which rows to keep 603 # keep_mask = pd.Series([True] * len(filtered_data)) 604 605 keep_mask = pd.Series(True, index=filtered_data.index) 606 607 608 for (start_time, end_time), arrival_rate_factor in time_arrival_dict.items(): 609 # Find rows in this time window 610 time_mask = (filtered_data[time_column] >= start_time) & (filtered_data[time_column] < end_time) 611 rows_in_window = filtered_data[time_mask] 612 613 if len(rows_in_window) == 0: 614 continue 615 616 # Calculate how many rows to remove 617 removal_rate = 1 - arrival_rate_factor 618 num_to_remove = int(len(rows_in_window) * removal_rate) 619 620 if num_to_remove > 0: 621 # Get indices of rows in this window 622 window_indices = rows_in_window.index.tolist() 623 624 # Create equispaced removal indices 625 # We remove equispaced samples to maintain temporal distribution 626 removal_step = len(window_indices) / num_to_remove if num_to_remove > 0 else len(window_indices) 627 removal_indices = [] 628 629 for i in range(num_to_remove): 630 idx_position = int(i * removal_step) 631 if idx_position < len(window_indices): 632 removal_indices.append(window_indices[idx_position]) 633 634 # Mark these indices for removal 635 keep_mask[removal_indices] = False 636 637 # Apply the filter 638 filtered_data = filtered_data[keep_mask] 639 640 return filtered_data.reset_index(drop=True)
Filter data based on arrival rate factors for different time periods.
Parameters:
data : pd.DataFrame The dataframe to filter (truck_data or train_data) time_arrival_dict : dict Dictionary with (start_time, end_time): arrival_rate_factor e.g., {(6.0, 10.0): 0.8, (10.0, 15.0): 0.6} arrival_rate_factor of 0.8 means keep 80% of data (remove 20%) time_column : str Name of the time column to use for filtering ('arrival_time' for trucks, 'arrival at' for trains)
Returns:
pd.DataFrame Filtered dataframe with specified removal rates applied
642def filter_truck_data(run_id, time_arrival_dict): 643 """ 644 Filter truck data based on arrival rate factors. 645 646 Parameters: 647 ----------- 648 run_id : str 649 Unique identifier for the simulation run 650 time_arrival_dict : dict 651 Dictionary with (start_time, end_time): arrival_rate_factor 652 653 Returns: 654 -------- 655 pd.DataFrame 656 Filtered truck dataframe 657 """ 658 truck_data = pd.read_pickle(f'.{run_id}/logs/truck_data.pkl') 659 updated_truck_data_path = f'.{run_id}/logs/truck_data.pkl' 660 print(truck_data.head()) 661 filtered_truck_data = filter_data_by_arrival_rate(truck_data, time_arrival_dict, 'arrival') 662 filtered_truck_data.to_pickle(updated_truck_data_path) 663 return filtered_truck_data
Filter truck data based on arrival rate factors.
Parameters:
run_id : str Unique identifier for the simulation run time_arrival_dict : dict Dictionary with (start_time, end_time): arrival_rate_factor
Returns:
pd.DataFrame Filtered truck dataframe
665def filter_train_data(run_id, time_arrival_dict): 666 """ 667 Filter train data based on arrival rate factors. 668 669 Parameters: 670 ----------- 671 run_id : str 672 Unique identifier for the simulation run 673 time_arrival_dict : dict 674 Dictionary with (start_time, end_time): arrival_rate_factor 675 676 Returns: 677 -------- 678 pd.DataFrame 679 Filtered train dataframe 680 """ 681 train_data = pd.read_csv(f'.{run_id}/logs/train_data.csv') 682 updated_train_data_path = f'.{run_id}/logs/train_data.csv' 683 filtered_train_data = filter_data_by_arrival_rate(train_data, time_arrival_dict, 'arrival at') 684 filtered_train_data.to_csv(updated_train_data_path, index=False) 685 return filtered_train_data
Filter train data based on arrival rate factors.
Parameters:
run_id : str Unique identifier for the simulation run time_arrival_dict : dict Dictionary with (start_time, end_time): arrival_rate_factor
Returns:
pd.DataFrame Filtered train dataframe
687def model_hurricane(env, terminal_resouces, num_terminals_list, terminal_data, run_id, seed): 688 """ 689 This function models a hurricane scenario by reducing the number of berths, cranes, pipelines, and conveyors in the port terminals. 690 It simulates the disruption by adjusting the resources available in the terminals between specified time periods. 691 Specificcally, the following changes are made: 692 - Reduces the number of berths in all container terminals by 1 from time 1165 to 2004. 693 - Reduces the number of berths in 10% of liquid and dry bulk terminals by 1 from time 1165 to 2004. 694 - Reduces the number of cranes in all container terminals by 1 from time 1165 to 2340. 695 - Reduces the number of pipelines in 25% of liquid terminals by 1 from time 1165 to 2340. 696 - Reduces the number of conveyors in 25% of dry bulk terminals by 1 from time 1165 to 2340. 697 Args: 698 env (simpy.Environment): The simulation environment. 699 terminal_resouces (tuple): A tuple containing the resources for the port terminals. 700 num_terminals_list (list): A list containing the number of container, liquid, and dry bulk terminals. 701 terminal_data (dict): A dictionary containing terminal data for crane transfer rates. 702 run_id (str): The unique identifier for the simulation run. 703 seed (int): The random seed for reproducibility. 704 returns: 705 None 706 """ 707 708 random.seed(seed) 709 710 port_berths_container_terminals, port_yard_container_terminals, port_berth_liquid_terminals, port_tanks_liquid_terminals, \ 711 port_berth_drybulk_terminals, port_silos_drybulk_terminals, port_loading_bays_liquid_terminals, port_drybulk_bays_drybulk_terminals, \ 712 port_chassis_container_terminals, truck_gates_ctr, truck_gates_liquid, truck_gates_dk, train_loading_racks_ctr, train_loading_racks_liquid, \ 713 train_loading_racks_dk, day_pilots, night_pilots, tugboats, channel_scheduler = terminal_resouces 714 715 num_container_terminals, num_liquid_terminals, num_drybulk_terminals = num_terminals_list 716 717 if SEVERE_HURRICANE == True and TROPICAL_STORM == False: 718 719 # stop all arrivals between 1500 and 1665 720 time_start = HURRICANE_START 721 disruption_duration = 24*6 722 723 # No arrivals for 6 days 724 stop_and_bulk_arrival(time_start, time_start + disruption_duration, run_id, plot_input=True) # 7 days better 725 726 # recovery periods 727 time_end_1_week = time_start + 24*7 # 7 days 728 time_end_2_week = time_start + 24*7 + 24*7 # 7 days 729 time_end_3_weeks = time_start + 24*7 + 24*14 # 14 days 730 time_end_4_weeks = time_start + 24*7 + 24*21 # 21 days 731 732 # For 1st week, 100% of berths have one crane down, 25% of liquid and dry bulk terminals have one pipeline and conveyor down in 50% of berths 733 env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 734 time_start, time_end_1_week, port_berths_container_terminals, terminal_data, 0.3)) 735 selected_liquid_terminals = random.sample( 736 range(num_liquid_terminals), int(0.1 * num_liquid_terminals)) 737 selected_drybulk_terminals = random.sample( 738 range(num_drybulk_terminals), int(0.5 * num_drybulk_terminals)) 739 env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 740 time_start, time_end_1_week, port_berth_liquid_terminals, terminal_data, 0.3)) 741 env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 742 time_start, time_end_1_week, port_berth_drybulk_terminals, terminal_data, 0.3)) 743 744 # For 1st week, 100% of berths have one crane down, 25% of liquid and dry bulk terminals have one pipeline and conveyor down in 50% of berths 745 env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 746 time_end_1_week, time_end_2_week, port_berths_container_terminals, terminal_data, 0.30)) 747 env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 748 time_end_1_week, time_end_2_week, port_berth_liquid_terminals, terminal_data, 0.30)) 749 env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 750 time_end_1_week, time_end_2_week, port_berth_drybulk_terminals, terminal_data, 0.30)) 751 752 # For 2nd week, 25% of berths have one crane down, 25% of (same) liquid and dry bulk terminals have one pipeline and conveyor down in 25% of berths 753 env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 754 time_end_2_week, time_end_3_weeks, port_berths_container_terminals, terminal_data, 0.2)) 755 env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 756 time_end_2_week, time_end_3_weeks, port_berth_liquid_terminals, terminal_data, 0.2)) 757 env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 758 time_end_2_week, time_end_3_weeks, port_berth_drybulk_terminals, terminal_data, 0.2)) 759 760 # For 3rd week, 10% of berths have one crane down, 25% of (same) liquid and dry bulk terminals have one pipeline and conveyor down in 10% of berths 761 env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 762 time_end_3_weeks, time_end_4_weeks, port_berths_container_terminals, terminal_data, 0.1)) 763 env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 764 time_end_3_weeks, time_end_4_weeks, port_berth_liquid_terminals, terminal_data, 0.1)) 765 env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 766 time_end_3_weeks, time_end_4_weeks, port_berth_drybulk_terminals, terminal_data, 0.1)) 767 768 # For 4th week, 10% of berths have one crane down, 25% of (same) liquid and dry bulk terminals have one pipeline and conveyor down in 10% of berths 769 # env.process(reduce_cranes(env, -1, list(range(num_container_terminals)), 770 # time_end_3_weeks, time_end_1_month, port_berths_container_terminals, terminal_data, 0.1)) 771 # env.process(reduce_pipelines(env, -1, selected_liquid_terminals, 772 # time_end_3_weeks, time_end_1_month, port_berth_liquid_terminals, terminal_data, 0.1)) 773 # env.process(reduce_conveyors(env, -1, selected_drybulk_terminals, 774 # time_end_3_weeks, time_end_1_month, port_berth_drybulk_terminals, terminal_data, 0.1)) 775 776 # for the first week only 50% of trucks/trains arrive and for the second week only 80% of trucks/trains arrive 777 778 truck_time_arrival_dict = { 779 (time_start, time_start + disruption_duration): 0.0, # no trucks during hurricane 780 (time_end_1_week, time_end_2_week): 0.1, 781 (time_end_2_week, time_end_3_weeks): 0.3, 782 (time_end_3_weeks, time_end_4_weeks): 0.5 783 } 784 785 train_time_arrival_dict = { 786 (time_start, time_start + disruption_duration): 0.0, # no trains during hurricane 787 (time_end_1_week, time_end_2_week): 0.0, 788 (time_end_2_week, time_end_3_weeks): 0.5 789 } 790 791 filter_truck_data(run_id, truck_time_arrival_dict) 792 filter_train_data(run_id, train_time_arrival_dict) 793 794 elif TROPICAL_STORM == True and SEVERE_HURRICANE == False: 795 796 time_start = HURRICANE_START 797 disruption_duration = 24*4 # 4 days 798 799 time_end_1_week = HURRICANE_START + 24*7 # 7 days 800 time_end_2_week = HURRICANE_START + 24*14 # 14 days 801 802 # No damage to port facilities 803 804 # No vessel arrivals for 1 week 805 stop_and_bulk_arrival(time_start, time_start + disruption_duration, run_id, plot_input=True) 806 807 # for the first week only 70% of trucks arrive and no change to trains 808 truck_time_arrival_dict = { 809 (time_start, time_end_1_week): 0.5, 810 (time_end_1_week, time_end_2_week): 0.8 811 } 812 813 filter_truck_data(run_id, truck_time_arrival_dict) 814 815 elif SEVERE_HURRICANE == True and TROPICAL_STORM == True: 816 print("Error: Both SEVERE_HURRICANE and TROPICAL_STORM flags cannot be True at the same time; update the config file.")
This function models a hurricane scenario by reducing the number of berths, cranes, pipelines, and conveyors in the port terminals. It simulates the disruption by adjusting the resources available in the terminals between specified time periods. Specificcally, the following changes are made:
- Reduces the number of berths in all container terminals by 1 from time 1165 to 2004.
- Reduces the number of berths in 10% of liquid and dry bulk terminals by 1 from time 1165 to 2004.
- Reduces the number of cranes in all container terminals by 1 from time 1165 to 2340.
- Reduces the number of pipelines in 25% of liquid terminals by 1 from time 1165 to 2340.
- Reduces the number of conveyors in 25% of dry bulk terminals by 1 from time 1165 to 2340.
Arguments:
- env (simpy.Environment): The simulation environment.
- terminal_resouces (tuple): A tuple containing the resources for the port terminals.
- num_terminals_list (list): A list containing the number of container, liquid, and dry bulk terminals.
- terminal_data (dict): A dictionary containing terminal data for crane transfer rates.
- run_id (str): The unique identifier for the simulation run.
- seed (int): The random seed for reproducibility.
returns: None
819def disruption_report_smoothed(smooth_window_points: int = 30, plot_legend: bool = False) -> str: 820 """ 821 Inputs: 822 - smooth_window_points: window size (odd integer) for rolling mean smoothing of raw queue 823 Outputs (returned as a formatted string): 824 1) times: onset, queue_in_point, mobilisation, peak, recovery 825 2) a blank line 826 3) durations: onset→queue_in_point, queue_in_point→mobilisation, mobilisation→peak, peak→recovery 827 4) a blank line 828 5) areas: positive (above baseline), negative (below baseline) 829 Note: All event times are identified on RAW data, but areas are computed using SMOOTHED data. 830 831 """ 832 warmup_time = WARMUP_ITERS 833 onset_time = HURRICANE_START 834 835 logfileid = f"{int(NUM_MONTHS)}_months_{NUM_RUNS}_runs_run_{LOG_NUM}" 836 df_location = f'collatedResults/{logfileid}/data/all_queue.csv' 837 838 df_raw = pd.read_csv(df_location) 839 df_raw = df_raw.rename(columns={df_raw.columns[0]: "time"}) 840 df_raw["mean"] = df_raw.iloc[:, 1:].mean(axis=1) 841 842 time_col = "time" 843 queue_col = "mean" 844 df = df_raw[[time_col, queue_col]].rename(columns={time_col: "time_hr", queue_col: "q_raw"}) 845 846 df = df.sort_values("time_hr").reset_index(drop=True) 847 df["time_hr"] = pd.to_numeric(df["time_hr"], errors="coerce") 848 df["q_raw"] = pd.to_numeric(df["q_raw"], errors="coerce") 849 df = df.dropna(subset=["time_hr", "q_raw"]).reset_index(drop=True) 850 851 # Smoothing for areas only 852 w = int(smooth_window_points) 853 if w < 3: w = 3 854 if w % 2 == 0: w += 1 855 df["q_smooth"] = df["q_raw"].rolling(window=w, center=True, min_periods=1).mean() 856 857 # Baseline from [1000, 1500] on RAW 858 t0, t1 = warmup_time, onset_time 859 base_mask = (df["time_hr"] >= t0) & (df["time_hr"] <= t1) 860 baseline = float(df.loc[base_mask, "q_raw"].mean()) 861 862 # A: onset 863 idx_A = int((df["time_hr"] - onset_time).abs().idxmin()) 864 A_t = float(df.at[idx_A, "time_hr"]); A_q = float(df.at[idx_A, "q_raw"]) 865 866 # t_queue_in_point: point where queue starts to increase (2 positive slopes) 867 dQdt = df["q_raw"].diff() / df["time_hr"].diff() 868 idx_queue_in = None 869 for i in range(idx_A + 1, len(df) - 2): 870 if (dQdt.iloc[i] > 0) and (dQdt.iloc[i + 1] > 0): 871 idx_queue_in = i; break 872 if idx_queue_in is None: idx_queue_in = min(idx_A + 1, len(df) - 1) 873 t_queue_in = float(df.at[idx_queue_in, "time_hr"]); q_queue_in = float(df.at[idx_queue_in, "q_raw"]) 874 875 # B: mobilisation (first upward crossing to baseline after t_queue_in_point) 876 idx_B = None 877 for i in range(idx_queue_in, len(df)): 878 if df.at[i, "q_raw"] >= baseline: 879 idx_B = i; break 880 if idx_B is None: idx_B = len(df) - 1 881 B_t = float(df.at[idx_B, "time_hr"]); B_q = baseline 882 883 # C: peak (raw max after mobilisation) 884 idx_C = int(df.loc[df["time_hr"] >= B_t, "q_raw"].idxmax()) 885 C_t = float(df.at[idx_C, "time_hr"]); C_q = float(df.at[idx_C, "q_raw"]) 886 887 # D: recovery (first downward crossing to baseline after C; linear interpolation on RAW) 888 # D_t, D_q = None, None 889 # for i in range(idx_C, len(df) - 1): 890 # y0, y1 = df.at[i, "q_raw"], df.at[i + 1, "q_raw"] 891 # x0, x1 = df.at[i, "time_hr"], df.at[i + 1, "time_hr"] 892 # if (y0 > baseline) and (y1 <= baseline): 893 # D_t = float(x0 + (baseline - y0) * (x1 - x0) / (y1 - y0)) if y1 != y0 else float(x1) 894 # D_q = baseline 895 # break 896 # if D_t is None: 897 # D_t = float(df.at[len(df) - 1, "time_hr"]); D_q = float(df.at[len(df) - 1, "q_raw"]) 898 899 # D: recovery (average queue over 2 weeks post-recovery point is <= baseline) 900 two_weeks_in_hours = 14 * 24 901 D_t, D_q = None, None 902 for i in range(idx_C, len(df)): 903 # Calculate the end point of the 2-week window 904 end_time = df.at[i, "time_hr"] + two_weeks_in_hours 905 906 # Get the subset of data for the 2-week window 907 window_mask = (df["time_hr"] >= df.at[i, "time_hr"]) & (df["time_hr"] <= end_time) 908 window_data = df.loc[window_mask, "q_raw"] 909 910 # If there's enough data for a full 2-week window, check the average 911 if len(window_data) >= 1: # Ensure there is at least one data point 912 avg_queue = window_data.mean() 913 if avg_queue <= baseline: 914 D_t = float(df.at[i, "time_hr"]) 915 D_q = float(df.at[i, "q_raw"]) 916 break 917 918 # Fallback in case no such point is found 919 if D_t is None: 920 D_t = float(df.at[len(df) - 1, "time_hr"]); D_q = float(df.at[len(df) - 1, "q_raw"]) 921 922 # Durations 923 dur_A_queue_in = t_queue_in - A_t 924 dur_queue_in_B = B_t - t_queue_in 925 dur_B_C = C_t - B_t 926 dur_C_D = D_t - C_t 927 928 # Areas between A and D using q_smooth 929 mask_AD = (df["time_hr"] >= A_t) & (df["time_hr"] <= D_t) 930 xs = df.loc[mask_AD, "time_hr"].to_numpy(dtype=float) 931 ys_sm = df.loc[mask_AD, "q_smooth"].to_numpy(dtype=float) 932 if xs.size == 0 or xs[-1] < D_t: 933 xs = np.append(xs, D_t); ys_sm = np.append(ys_sm, baseline) 934 area_pos = float(np.trapz(np.maximum(0.0, ys_sm - baseline), xs)) 935 area_neg = float(np.trapz(np.maximum(0.0, baseline - ys_sm), xs)) 936 937 # Calculate new metrics 938 # Robustness (R): R = Q(t_min) / Q_0_bar 939 idx_min = int(df.loc[(df["time_hr"] >= A_t) & (df["time_hr"] <= t_queue_in), "q_raw"].idxmin()) 940 q_min = df.at[idx_min, "q_raw"] 941 robustness = q_min / baseline 942 943 # Mobilization (M): M = time from onset to mobilisation 944 mobilization = B_t - A_t 945 946 # Peak Queue Increase (P): P = (Q(t_max) - Q_0_bar) / Q_0_bar 947 peak_increase = (C_q - baseline) / baseline 948 949 # Max Recovery Rate (m_max): slope of the next 'n' points from the peak 950 # Initialize variables to store the most negative slope and the corresponding lookahead 951 max_negative_rate = 0.0 952 best_lookahead = 0 953 best_lookahead_idx = idx_C 954 955 # Loop through lookahead values from 5 to 50 956 for recovery_rate_lookahead in range(5, 51): 957 idx_C_plus_n = min(idx_C + recovery_rate_lookahead, len(df) - 1) 958 if idx_C_plus_n > idx_C: 959 rate = (df.at[idx_C_plus_n, "q_raw"] - C_q) / (df.at[idx_C_plus_n, "time_hr"] - C_t) 960 else: 961 rate = 0.0 962 963 # Check if the current rate is more negative than the current maximum negative rate 964 if rate < max_negative_rate: 965 max_negative_rate = rate 966 best_lookahead = recovery_rate_lookahead 967 best_lookahead_idx = idx_C_plus_n 968 969 970 971 # drop lat 200 points to avoid edge effects in smoothing 972 df = df.iloc[:-200, :].reset_index(drop=True) 973 974 # Plot (PDF): raw & smoothed; shading from smoothed 975 logfileid = f"{int(NUM_MONTHS)}_months_{NUM_RUNS}_runs_run_{LOG_NUM}" 976 pdf_path = f'collatedResults/{logfileid}/hurricane_report.pdf' 977 plt.figure(figsize=(16, 6.5)) 978 plt.plot(df["time_hr"], df["q_raw"], label="Queue (raw)", alpha=0.5) 979 plt.plot(df["time_hr"], df["q_smooth"], label=f"Queue (smoothed)", linewidth=2) 980 plt.axhline(baseline, linestyle="--", label="Baseline") 981 plt.fill_between(xs, ys_sm, baseline, where=(ys_sm < baseline), alpha=0.3, label="Lost service", color="orange") 982 plt.fill_between(xs, ys_sm, baseline, where=(ys_sm > baseline), alpha=0.2, label="Excess queuing", color="pink") 983 984 # shade the warmup period 985 plt.axvspan(0, warmup_time, color='gray', alpha=0.2, label="Warmup period") 986 987 # Plot the max recovery rate line segment 988 x_rate = [C_t, df.at[best_lookahead_idx, "time_hr"]] 989 y_rate = [C_q, df.at[best_lookahead_idx, "q_raw"]] 990 plt.plot(x_rate, y_rate, 'g--', label="Max recovery rate", linewidth=2) 991 992 for (xt, yv), lbl in [((A_t, A_q), "Onset"), ((B_t, B_q), "Mobilisation"), 993 ((C_t, C_q), "Peak"), ((D_t, D_q), "Recovery")]: 994 plt.scatter([xt], [yv], s=64, label=lbl) 995 plt.xlabel("Time (hr)", fontsize=16); plt.ylabel("Queue length", fontsize=16); plt.tight_layout() 996 if plot_legend: 997 plt.legend(fontsize=14) 998 plt.xticks(fontsize=14); plt.yticks(fontsize=14) 999 plt.savefig(pdf_path, dpi=200, bbox_inches="tight") 1000 plt.show() 1001 1002 lines = [ 1003 f"onset: {A_t:.3f}", 1004 f"queue-in-point: {t_queue_in:.3f}", 1005 f"mobilisation: {B_t:.3f}", 1006 f"peak: {C_t:.3f}", 1007 f"recovery: {D_t:.3f}", 1008 "", 1009 f"time onset to queue-in-point (days): {dur_A_queue_in / 24:.1f}", 1010 f"time queue-in-point to mobilisation (days): {dur_queue_in_B / 24:.1f}", 1011 f"time mobilisation to peak (days): {dur_B_C / 24:.1f}", 1012 f"time peak to recovery (days): {dur_C_D / 24:.1f}", 1013 "", 1014 f"area above baseline (vessel days): {area_pos / 24:.2f}", 1015 f"area below baseline (vessel days): {area_neg / 24:.2f}", 1016 "", 1017 f"Robustness: {robustness:.2f}", 1018 f"Mobilization: {mobilization:.2f}", 1019 f"Peak Queue Increase: {peak_increase:.2f}", 1020 f"Max Recovery Rate: {max_negative_rate:.2f}" 1021 ] 1022 1023 with open(f'collatedResults/{logfileid}/hurricane_report.txt', 'w') as f: 1024 for line in lines: 1025 f.write(line + "\n") 1026 print(line) 1027 1028 return "\n".join(lines), str(pdf_path)
Inputs:
- smooth_window_points: window size (odd integer) for rolling mean smoothing of raw queue
Outputs (returned as a formatted string): 1) times: onset, queue_in_point, mobilisation, peak, recovery 2) a blank line 3) durations: onset→queue_in_point, queue_in_point→mobilisation, mobilisation→peak, peak→recovery 4) a blank line 5) areas: positive (above baseline), negative (below baseline) Note: All event times are identified on RAW data, but areas are computed using SMOOTHED data.
1031def fog_peak_slopes(slope_steps: int = 5, baseline_end: float = 2*7*14): 1032 """ 1033 - Shades inbound/outbound closure windows. 1034 - For each pair i, finds the raw peak within the union window [min(in_i, out_i), max(in_i, out_i)]. 1035 - Computes slope from peak -> next `slope_steps` points: (q[t+k]-q[t])/(time[t+k]-time[t]). 1036 - Produces TWO plots: 1037 1) Full view: queue with inbound/outbound shaded (no slope segments). 1038 2) Zoomed to [first_closure_start, last_closure_end]: queue with closures shaded AND slope segments. 1039 - Prints a compact table (no CSV) with: i, peak_time, peak_queue, slope_peak_to_nextK. 1040 1041 Uses default Matplotlib colors; markers are circles; no text annotations. 1042 """ 1043 # ---- Load data ---- 1044 warmup_time = WARMUP_ITERS 1045 inbound_closed = INBOUND_CLOSED 1046 outbound_closed = OUTBOUND_CLOSED 1047 1048 logfileid = f"{int(NUM_MONTHS)}_months_{NUM_RUNS}_runs_run_{LOG_NUM}" 1049 df_location = f'collatedResults/{logfileid}/data/all_queue.csv' 1050 1051 df_raw = pd.read_csv(df_location) 1052 df_raw = df_raw.rename(columns={df_raw.columns[0]: "time"}) 1053 df_raw["mean"] = df_raw.iloc[:, 1:].mean(axis=1) 1054 1055 1056 time_col = "time" 1057 mean_col = "mean" 1058 df = df_raw[[time_col, mean_col]].rename(columns={time_col:"time_hr", mean_col:"q_raw"}) 1059 1060 df = df.sort_values("time_hr").reset_index(drop=True) 1061 df["time_hr"] = pd.to_numeric(df["time_hr"], errors="coerce") 1062 df["q_raw"] = pd.to_numeric(df["q_raw"], errors="coerce") 1063 df = df.dropna(subset=["time_hr","q_raw"]).reset_index(drop=True) 1064 1065 # Baseline from 1000 to 1500 (raw) 1066 base_mask = (df["time_hr"] >= warmup_time) & (df["time_hr"] <= baseline_end) 1067 baseline = float(df.loc[base_mask, "q_raw"].mean()) 1068 1069 # ---- Per-pair peak and slope to next K steps ---- 1070 results = [] 1071 n = min(len(inbound_closed), len(outbound_closed)) 1072 for i in range(n): 1073 a0, a1 = inbound_closed[i] 1074 b0, b1 = outbound_closed[i] 1075 win_start = min(a0, b0) 1076 win_end = max(a1, b1) 1077 mask_win = (df["time_hr"] >= win_start) & (df["time_hr"] <= win_end) 1078 if mask_win.sum() == 0: 1079 continue 1080 idx_peak = int(df.loc[mask_win, "q_raw"].idxmax()) 1081 t_peak = float(df.at[idx_peak, "time_hr"]) 1082 q_peak = float(df.at[idx_peak, "q_raw"]) 1083 1084 # Check if the queue decreases after the peak 1085 idx_k = min(idx_peak + max(1, int(slope_steps)), len(df)-1) 1086 t_k = float(df.at[idx_k, "time_hr"]) 1087 q_k = float(df.at[idx_k, "q_raw"]) 1088 1089 # Only calculate and append the result if the queue decreases 1090 if q_k <= q_peak: 1091 slope = (q_k - q_peak)/(t_k - t_peak) if (t_k - t_peak) != 0 else np.nan 1092 results.append({ 1093 "i": i+1, 1094 "peak_time": t_peak, 1095 "peak_queue": q_peak, 1096 "nextK_time": t_k, 1097 "nextK_queue": q_k, 1098 "slope_peak_to_nextK": slope 1099 }) 1100 1101 # ---- PLOTS ---- 1102 # 1) Full view: open/closed only 1103 pdf_full = f'collatedResults/{logfileid}/fog_report_full.pdf' 1104 export_prefix = f'collatedResults/{logfileid}/fog_report' 1105 1106 plt.figure(figsize=(14,7)) 1107 plt.plot(df["time_hr"], df["q_raw"], label="Queue") 1108 plt.axhline(baseline, linestyle="--", label="Baseline") 1109 for (s,e) in inbound_closed: 1110 plt.axvspan(s, e, alpha=0.18, color= 'green', label="Inbound closed" if (s,e)==inbound_closed[0] else None) 1111 for (s,e) in outbound_closed: 1112 plt.axvspan(s, e, alpha=0.18, color = 'orange', label="Outbound closed" if (s,e)==outbound_closed[0] else None) 1113 plt.xlabel("Time (hr)", fontsize=16); plt.ylabel("Queue length", fontsize=16); plt.legend(loc="best", fontsize=16) 1114 plt.xticks(fontsize=14); plt.yticks(fontsize=14) 1115 plt.tight_layout(); plt.savefig(pdf_full, dpi=200, bbox_inches="tight"); plt.show(); plt.close() 1116 1117 # 2) Zoomed: show slopes too 1118 pdf_zoom = Path(f"{export_prefix}_zoom.pdf") 1119 fog_start = min(min(x[0] for x in inbound_closed), min(x[0] for x in outbound_closed)) 1120 fog_end = max(max(x[1] for x in inbound_closed), max(x[1] for x in outbound_closed)) 1121 m = (df["time_hr"] >= fog_start) & (df["time_hr"] <= fog_end) 1122 plt.figure(figsize=(14,7)) 1123 plt.plot(df.loc[m, "time_hr"], df.loc[m, "q_raw"], label="Queue") 1124 plt.axhline(baseline, linestyle="--", label="Baseline") 1125 for (s,e) in inbound_closed: 1126 if e>=fog_start and s<=fog_end: 1127 plt.axvspan(s, e, alpha=0.18, color = 'green', label="Inbound closed" if (s,e)==inbound_closed[0] else None) 1128 for (s,e) in outbound_closed: 1129 if e>=fog_start and s<=fog_end: 1130 plt.axvspan(s, e, alpha=0.18, color = 'orange', label="Outbound closed" if (s,e)==outbound_closed[0] else None) 1131 # circle markers + slope segments 1132 for r in results: 1133 if fog_start <= r["peak_time"] <= fog_end: 1134 plt.plot([r["peak_time"]], [r["peak_queue"]], marker='o', linestyle='None', color='green', markersize=8) 1135 plt.plot([r["peak_time"], r["nextK_time"]], 1136 [r["peak_queue"], r["nextK_queue"]], 1137 linestyle='--', linewidth=3, color = 'k') 1138 plt.xlabel("Time (hr)",fontsize=16); plt.ylabel("Queue length",fontsize=16) 1139 plt.legend( 1140 loc="upper center", # anchor relative to top center 1141 bbox_to_anchor=(0.5, -0.1), # shift it below the axes 1142 fontsize=16, 1143 ncol=4 # optional: spread entries into multiple columns 1144 ) 1145 plt.xticks(fontsize=14); plt.yticks(fontsize=14) 1146 plt.tight_layout(); plt.savefig(pdf_zoom, dpi=200, bbox_inches="tight"); plt.show(); plt.close() 1147 1148 # ---- PRINT TABLE ---- 1149 print("i, peak_time, peak_queue, slope_peak_to_nextK") 1150 for r in results: 1151 print(f"{r['i']}, {r['peak_time']:.3f}, {r['peak_queue']:.3f}, {r['slope_peak_to_nextK']:.6f}") 1152 print("\nAverage slope is", np.mean([r["slope_peak_to_nextK"] for r in results])) 1153 1154 # save results as a text file 1155 with open(f'collatedResults/{logfileid}/fog_report.txt', 'w') as f: 1156 f.write("i, peak_time, peak_queue, slope_peak_to_nextK\n") 1157 for r in results: 1158 f.write(f"{r['i']}, {r['peak_time']:.3f}, {r['peak_queue']:.3f}, {r['slope_peak_to_nextK']:.6f}\n") 1159 1160 return str(pdf_full), str(pdf_zoom)
- Shades inbound/outbound closure windows.
- For each pair i, finds the raw peak within the union window [min(in_i, out_i), max(in_i, out_i)].
- Computes slope from peak -> next
slope_stepspoints: (q[t+k]-q[t])/(time[t+k]-time[t]). - Produces TWO plots: 1) Full view: queue with inbound/outbound shaded (no slope segments). 2) Zoomed to [first_closure_start, last_closure_end]: queue with closures shaded AND slope segments.
- Prints a compact table (no CSV) with: i, peak_time, peak_queue, slope_peak_to_nextK.
Uses default Matplotlib colors; markers are circles; no text annotations.