diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..4e44082 --- /dev/null +++ b/environment.yml @@ -0,0 +1,16 @@ +name: floodplains_muskingum +channels: + - conda-forge +dependencies: + - python + - matplotlib + - ca-certificates + - openssl + - scipy + - certifi + - anaconda::cython + - pandas + - anaconda::pillow + - statsmodels + - anaconda::seaborn +prefix: /users/k/l/klawson1/.conda/envs/floodplains_muskingum diff --git a/samples/CIROH/muskingum_cunge.py b/samples/CIROH/muskingum_cunge.py index ba369da..32b0d52 100644 --- a/samples/CIROH/muskingum_cunge.py +++ b/samples/CIROH/muskingum_cunge.py @@ -7,6 +7,7 @@ from scipy.ndimage import gaussian_filter1d from statsmodels.nonparametric.smoothers_lowess import lowess import time +import sys ### Static Data ### @@ -235,7 +236,7 @@ def execute(meta_path, debug_plots=False): outflows = inflows.copy() for iter in range(subreaches): try: - outflows = mc_reach.route_hydrograph_c(outflows, dt) + outflows = mc_reach.route_hydrograph(outflows, (dt * 60 * 60), lateral=None, initial_outflow=None, short_ts=False, solver='fread-c') except AssertionError: outflows = np.repeat(np.nan, outflows.shape[0]) break @@ -307,5 +308,5 @@ def execute(meta_path, debug_plots=False): out_data.to_csv(run_dict['muskingum_path']) if __name__ == '__main__': - run_path = r"/users/k/l/klawson1/netfiles/ciroh/floodplainsData/runs/9/run_metadata.json" + run_path = sys.argv[1] execute(run_path, debug_plots=False) diff --git a/samples/basin_analysis/make_runs.py b/samples/basin_analysis/make_runs.py new file mode 100644 index 0000000..f644065 --- /dev/null +++ b/samples/basin_analysis/make_runs.py @@ -0,0 +1,41 @@ +import os +import json + + +def create_project(data_path, run_path, geom_type): + template = { + "base_dir": data_path, + "meta_path": os.path.join(data_path, "reach_meta.csv"), + "lateral_path": os.path.join(data_path, "laterals.csv"), + "inflow_path": os.path.join(data_path, "inflows.csv"), + "init_conditions_path": os.path.join(data_path, "init_conditions.csv"), + "geometry_dir": "/netfiles/ciroh/floodplainsData/runs/NWM/geometry", + "geometry_source": geom_type.upper(), + "dt": "5m", + "optimize_dx": False, + "conserve_mass": False, + "short_ts": True, + "lat_addition": "top", + "id_field": "comid", + "to_id_field": "ds_comid", + "mode": 'basin', + "out_dir": os.path.join(data_path, f'{geom_type.lower()}_outputs'), + } + os.makedirs(template['out_dir'], exist_ok=True) + with open(run_path, 'w') as f: + json.dump(template, f, indent=4) + +base_dir = r"/netfiles/ciroh/floodplainsData/retrospective" +runs_path = os.path.join(base_dir, 'basin_run_files') +catchments = ['lamoille', 'lewis', 'mettawee', 'missisquoi', 'otter', 'winooski'] +for catchment in catchments: + tmp_dir = os.path.join(base_dir, catchment) + folders = [f for f in os.listdir(tmp_dir) if os.path.isdir(os.path.join(tmp_dir, f))] + for f in folders: + data_path = os.path.join(tmp_dir, f) + geom_type = 'nwm' + run_path = os.path.join(runs_path, f'{catchment}_{f}_{geom_type}.json') + create_project(data_path, run_path, geom_type) + geom_type = 'hand' + run_path = os.path.join(runs_path, f'{catchment}_{f}_{geom_type}.json') + create_project(data_path, run_path, geom_type) diff --git a/samples/basin_analysis/preprocessing.py b/samples/basin_analysis/preprocessing.py new file mode 100644 index 0000000..2540c1d --- /dev/null +++ b/samples/basin_analysis/preprocessing.py @@ -0,0 +1,222 @@ +import pandas as pd +import numpy as np +import json +import os +from muskingumcunge.controller import create_project +import netCDF4 as nc + + +# Load data +# paths = r"/users/k/l/klawson1/netfiles/ciroh/floodplainsData/retrospective/lamoille/2002/mc_run.json" +# paths = r"/users/k/l/klawson1/netfiles/ciroh/floodplainsData/retrospective/lamoille/2019/mc_run.json" +paths = r"/users/k/l/klawson1/netfiles/ciroh/floodplainsData/retrospective/otter/2011/mc_run.json" +# paths = r"/users/k/l/klawson1/netfiles/ciroh/floodplainsData/retrospective/otter/2019/mc_run.json" +# paths = r"/users/k/l/klawson1/netfiles/ciroh/floodplainsData/retrospective/lewis/1996/mc_run.json" + + +def clean(df): + if 'Unnamed: 0' in df.columns: + df = df.drop(columns='Unnamed: 0') + if 'datetime' in df.columns: + df = df[df['datetime'].notnull()] + df["datetime"] = df["datetime"].map(lambda x: x + " 00:00:00" if len(x) == 10 else x) + df["datetime"] = pd.to_datetime(df["datetime"]) + df = df.set_index("datetime") + if 'NA' in df.columns: + df = df.drop(columns='NA') + return df + +def find_root(reaches): + dataset = nc.Dataset(param_path) + comids = np.array(dataset.variables['link'][:]) + to = np.array(dataset.variables['to'][:]) + outflow_dict = {us: ds for us, ds in zip(comids, to)} + r1 = reaches[0] # just need to trace one reach to outlet + outflow = outflow_dict[r1] + while outflow in reaches: + r1 = outflow + outflow = outflow_dict[r1] + return r1 + +def extract_from_root(root): + print(f'Extracting data from root {root}') + print(f'Loading parameters from {param_path}') + dataset = nc.Dataset(param_path) + comids = np.array(dataset.variables['link'][:]) + to = np.array(dataset.variables['to'][:]) + + print('traversing network...') + keep = list() + q = [root] + while len(q) > 0: + cur = q.pop() + children = comids[to == cur] + q.extend(children) + keep.extend(children) + + print('extracting subset...') + indices = np.where(np.isin(comids, keep)) + data = {} + for var_name in ['link', 'to', 'n', 'nCC', 'order', 'Length', 'So', 'ChSlp', 'BtmWdth', 'TopWdthCC', 'TopWdth', 'NHDWaterbodyComID', 'MusK', 'MusX', 'Kchan']: + var_data = dataset.variables[var_name][:] + if var_data.ndim == 0: + continue + elif var_data.ndim > 1: + data[var_name] = var_data[indices, :] + else: + data[var_name] = var_data[indices] + + # Close the dataset + dataset.close() + + print('creating DataFrame...') + # Create a DataFrame from the dictionary + df = pd.DataFrame(data) + return df + + +run_dir = r'/netfiles/ciroh/floodplainsData/retrospective/basin_run_files/MSQ' +all_paths = [os.path.join(run_dir, f) for f in os.listdir(run_dir) if f.endswith('.json')] +# all_paths = [r'/netfiles/ciroh/floodplainsData/retrospective/basin_run_files/lamoille_2019_hand.json'] + + +for paths in all_paths: + print(f'Processing {paths}') + + # Establish file structure and paths + if not os.path.exists(paths): + create_project(paths) # Optional make template + with open(paths, 'r') as f: + paths = json.load(f) + lateral_path = paths['lateral_path'] + inflow_path = paths['inflow_path'] + out_path = paths['meta_path'] + geom_dir = paths['geometry_dir'] + + ds_hydrographs = os.path.join(paths['base_dir'], 'downstream_hydrographs.csv') + head_path = os.path.join(paths['base_dir'], 'headwaters.csv') + lake_path = os.path.join(paths['base_dir'], 'lake.csv') + + meta_path = r"/netfiles/ciroh/floodplainsData/runs/NWM/network/reach_data.csv" + param_path = r"/netfiles/ciroh/floodplainsData/runs/NWM/working/RouteLink_CONUS.nc" + da_path = r"/netfiles/ciroh/floodplainsData/runs/NWM/working/drain_areas.csv" + # if os.path.exists(out_path): + # print(f'{out_path} already exists, skipping') + # continue + + # Load data + ds = pd.read_csv(ds_hydrographs) # treating this as the model domain + ds = clean(ds) + reaches = [int(i) for i in ds.columns] + root = find_root(reaches) + meta_df = extract_from_root(root) + meta_df = meta_df.sort_values("link") + meta_df = meta_df.rename(columns={"link": paths['id_field'], "to": paths['to_id_field'], 'Length': 'length', 'So': 'slope'}) + meta_df = meta_df.drop_duplicates() # remove completely duplicated rows + meta_df = meta_df.set_index("comid") + + # load DAs + das = pd.read_csv(da_path) + das = das.set_index("ID") + meta_df = meta_df.join(das, how='left') + replace_nan = lambda x: np.nanmean(meta_df['TotDASqKm']) if np.isnan(x) else x + meta_df['TotDASqKm'] = meta_df['TotDASqKm'].map(replace_nan) # maybe should update later + + # Remove divergences by keeping largest connection first + count = len(meta_df) + meta_df = meta_df.sort_values('order', ascending=False) + meta_df = meta_df[~meta_df.index.duplicated(keep='first')].copy().sort_index() + print(f'Removed {count - len(meta_df)} divergences') + + # Check for single root + roots = list(set(meta_df['ds_comid']).difference(set(meta_df.index))) + if len(roots) > 1: + raise RuntimeError(f'Multiple roots detected: {roots}') + + # Handle missing data by grabbing next ds reach + missing = meta_df[meta_df[['n', 'nCC', 'slope', 'ChSlp', 'BtmWdth', 'TopWdthCC', 'TopWdth', 'TotDASqKm']].isnull().values.any(axis=1)].index + print(f'Handling missing data for {len(missing)} reaches') + print(missing) + for col in missing: + isna = True + ds = col + while isna: + ds = meta_df.loc[ds, 'ds_comid'] + if ds in missing: + continue + else: + isna = False + if ds == roots[0]: + meta_df.loc[col, ['n', 'ncc', 'TotDASqKm', 'slope']] = [0.06, 0.12, meta_df['TotDASqKm'].max(), np.exp(np.mean(np.log(meta_df['slope'])))] + else: + meta_df.loc[col, ['n', 'ncc', 'TotDASqKm', 'slope']] = meta_df.loc[ds, ['n', 'ncc', 'TotDASqKm', 'slope']] + meta_df.loc[col, 'length'] = meta_df['length'].mean() + + # Add lakes to headwater forcings + try: + lakes = meta_df['NHDWaterbodyComID'].unique() + lakes = lakes[lakes != -9999] + force_lakes = list() + for l in lakes: + # Find root of each lake + lake_reaches = list(meta_df[meta_df['NHDWaterbodyComID'] == l].index) + lake_root = lake_reaches[0] # initialize + outflow = meta_df.loc[lake_root, 'ds_comid'] + while outflow in lake_reaches: + lake_root = meta_df.loc[lake_root, 'ds_comid'] # update lake root + outflow = meta_df.loc[lake_root, 'ds_comid'] + force_lakes.append(str(int(lake_root))) + except: + force_lakes = list() + + # Prune upstream of headwater forcings + head_df = pd.read_csv(head_path) # reaches we don't want to route through + tmp_cols = list(head_df.columns) + tmp_cols.remove('Unnamed: 0') + tmp_cols.remove('datetime') + heads = [float(c) for c in tmp_cols] + small = list(meta_df[meta_df['TotDASqKm'] < 5.2].index) # also treat small reaches as headwaters + heads.extend(small) + heads.extend([float(i) for i in force_lakes]) + prune_counter = 0 + for h in heads: + us = list(meta_df[meta_df['ds_comid'] == h].index) + while len(us) > 0: + tmp = us.pop() + us.extend(list(meta_df[meta_df['ds_comid'] == tmp].index)) + meta_df = meta_df.drop(index=tmp).copy() + prune_counter += 1 + print(f'Removed {prune_counter} reaches to align with headwater forcings') + + # Check that all final network have valid geometry + geom_area_path = os.path.join(geom_dir, 'area.csv') + geom_area = pd.read_csv(geom_area_path) + valid_geom = geom_area.columns[~(geom_area == 0).all(axis=0)].astype(int) + missing_geom = list(set(meta_df.index).difference(set(valid_geom))) + if len(missing_geom) > 0: + print(f'Missing geometry for {len(missing_geom)} reaches') + print(missing_geom) + meta_df['valid_geom'] = meta_df.index.isin(valid_geom) + + # make an inflows file + headwaters = list(set(meta_df.index).difference(set(meta_df['ds_comid']))) + headwaters = [str(h) for h in headwaters] + try: + inflow_df = ds[headwaters].copy() + except KeyError: + missing = list(set(headwaters).difference(set(ds.columns))) + missing_ds = pd.DataFrame(index=missing, columns=['missing_ds']) + missing_ds['missing_ds'] = True + missing_ds.to_csv(os.path.join(paths['base_dir'], 'missing_ds.csv')) + raise RuntimeError(f'Missing downstream reaches in {ds_hydrographs}') + inflow_df.to_csv(inflow_path) + + # Make an initial conditions file + init_flow = pd.DataFrame(ds.loc[ds.index[0], meta_df.index.astype(str)]) + init_flow = init_flow.rename(columns={ds.index[0]: 'init_discharge'}) + init_flow.to_csv(paths['init_conditions_path']) + + # Export metadata + meta_df['comid'] = meta_df.index + meta_df = meta_df[['comid', 'ds_comid', 'n', 'nCC', 'order', 'TotDASqKm', 'length', 'slope', 'ChSlp', 'BtmWdth', 'TopWdthCC', 'TopWdth', 'NHDWaterbodyComID', 'MusK', 'MusX', 'Kchan', 'valid_geom']] + meta_df.to_csv(out_path, index=False) diff --git a/samples/basin_analysis/run_all_basinwide.py b/samples/basin_analysis/run_all_basinwide.py new file mode 100644 index 0000000..045dd12 --- /dev/null +++ b/samples/basin_analysis/run_all_basinwide.py @@ -0,0 +1,13 @@ +import os +import json +from muskingumcunge.controller import handler +import numpy as np + +run_dir = r'/netfiles/ciroh/floodplainsData/retrospective/basin_run_files' +all_paths = [os.path.join(run_dir, f) for f in os.listdir(run_dir) if f.endswith('.json')] + +for path in all_paths: + with open(path, 'r') as f: + dict = json.load(f) + + handler(path) \ No newline at end of file diff --git a/samples/simple_routing/simple_routing.py b/samples/simple_routing/simple_routing.py index d0b22e7..b5cefe1 100644 --- a/samples/simple_routing/simple_routing.py +++ b/samples/simple_routing/simple_routing.py @@ -16,7 +16,7 @@ reach = TrapezoidalReach(100, 100, 0.1, 0.001, 2000) # Route flows -outflows = reach.route_hydrograph(inflows, 0.1) +outflows = reach.route_hydrograph(inflows, 360, solver='fread') # Plot results fig, ax = plt.subplots() @@ -26,4 +26,5 @@ ax.set_ylabel('Flowrate(cms)') plt.legend() plt.tight_layout() -plt.show() \ No newline at end of file +plt.savefig('simple_routing.png', dpi=300) +# plt.show() \ No newline at end of file diff --git a/src/muskingumcunge/controller.py b/src/muskingumcunge/controller.py new file mode 100644 index 0000000..ed35f95 --- /dev/null +++ b/src/muskingumcunge/controller.py @@ -0,0 +1,239 @@ +import os +import sys +import json +import pandas as pd +import numpy as np +from muskingumcunge.reach import CompoundReach, CustomReach, MuskingumReach, WRFCompound +from muskingumcunge.network import Network + + +def create_project(path): + template = { + "base_dir": os.path.dirname(path), + "meta_path": os.path.join(os.path.dirname(path), "reach_meta.csv"), + "lateral_path": os.path.join(os.path.dirname(path), "laterals.csv"), + "head_path": os.path.join(os.path.dirname(path), "headwaters.csv"), + "lake_path": os.path.join(os.path.dirname(path), "lake.csv"), + "geometry_dir": "", # expecting the format of the geometry folder generated by https://github.com/CIROH-UVM/floodplain-routing + "geometry_source": "NWM", + "optimize_dx": False, + "conserve_mass": False, + "short_ts": False, + "dt": "5m", + "lat_addition": "top", + "id_field": "comid", + "to_id_field": "ds_comid", + "mode": 'basin', + "out_dir": os.path.join(os.path.dirname(path), 'outputs'), + } + os.makedirs(template['out_dir'], exist_ok=True) + with open(path, 'w') as f: + json.dump(template, f, indent=4) + +def clean_ts(df): + if 'Unnamed: 0' in df.columns: + df = df.drop(columns='Unnamed: 0') + if 'datetime' in df.columns: + df = df[df['datetime'].notnull()] + df["datetime"] = df["datetime"].map(lambda x: x + " 00:00:00" if len(x) == 10 else x) + df["datetime"] = pd.to_datetime(df["datetime"]) + df = df.set_index("datetime") + df = df.astype(np.float64) + if 'NA' in df.columns: + df = df.drop(columns='NA') + return df + +def load_geom(meta_df, source='NWM', geom_dir=None): + reaches = dict() + missing_geom = list() + if source == 'HAND': + hand_tw = pd.read_csv(os.path.join(geom_dir, 'area.csv')) + hand_el = pd.read_csv(os.path.join(geom_dir, 'el.csv')) + hand_wp = pd.read_csv(os.path.join(geom_dir, 'p.csv')) + hand_ar = pd.read_csv(os.path.join(geom_dir, 'vol.csv')) + for r in list(meta_df.index): + ch_n = meta_df.loc[r, 'n'] + fp_n = meta_df.loc[r, 'nCC'] + slope = meta_df.loc[r, 'slope'] + length = meta_df.loc[r, 'length'] + da = meta_df.loc[r, 'TotDASqKm'] + + tw = meta_df.loc[r, 'TopWdth'] + bw = meta_df.loc[r, 'BtmWdth'] + z = 2 / (meta_df.loc[r, 'ChSlp']) + tw_cc = meta_df.loc[r, 'TopWdthCC'] + bf = (tw - bw) / (z) + + if source == 'NWM': + reaches[r] = WRFCompound(bw, z, bf, tw_cc, ch_n, fp_n, slope, length, max_stage=100*bf, stage_resolution=10000) + elif source == 'NWM_Regression': + tw = 2.44 * (da ** 0.34) + a_ch = 0.75 * (da ** 0.53) + bf = (a_ch / tw) * 1.25 + bw = ((2 * a_ch) / bf) - tw + z = (tw - bw) / bf + tw_cc = 3 * tw + reaches[r] = WRFCompound(bw, z, bf, tw_cc, ch_n, fp_n, slope, length, max_stage=12*bf, stage_resolution=1000) + elif source == 'HAND': + try: + tw = hand_tw[r].to_numpy() / length + if np.all(tw == 0): + raise RuntimeError('All top-width values are zero') + wp = hand_wp[r].to_numpy() / length + ar = hand_ar[r].to_numpy() / length + el = hand_el[r].to_numpy() + n = np.ones(el.shape) * fp_n + n[el < bf] = ch_n + n[el >= bf] = fp_n + reaches[r] = CustomReach(n, slope, length, el, tw, ar, wp) + except Exception as e: + # Error catching. Default to NWM channel + print(f'Error loading HAND data for reach {r}. Defaulting to NWM channel') + print(f'Error: {e}') + missing_geom.append(r) + reaches[r] = WRFCompound(bw, z, bf, tw_cc, ch_n, fp_n, slope, length, max_stage=12*bf, stage_resolution=len(hand_tw)) + elif source == 'MUSKINGUM': + x = meta_df.loc[r, 'MusX'] + k = meta_df.loc[r, 'MusK'] + reaches[r] = MuskingumReach(k, x) + + if len(missing_geom) > 0: + print(f'Error loading {len(missing_geom)} / {len(reaches)} reaches') + print(missing_geom) + + return reaches, missing_geom + +def execute_by_reach(meta_path): + # Load data + with open(meta_path, 'r') as f: + paths = json.load(f) + + meta_df = pd.read_csv(paths['meta_path']) + meta_df['comid'] = meta_df['comid'].astype(int).astype(str) + meta_df['ds_comid'] = meta_df['ds_comid'].astype(int).astype(str) + meta_df = meta_df.set_index("comid") + + upstream = pd.read_csv(os.path.join(paths['base_dir'], 'upstream_hydrographs.csv')) + upstream = clean_ts(upstream) + upstream = upstream.reindex(sorted(upstream.columns), axis=1) + + # Set up reaches + reaches = load_geom(meta_df, source=paths['geometry_source'], geom_dir=paths['geometry_dir']) + + # export celerity curves + stages = {r: reaches[r].geometry['stage'] for r in reaches} + stage_df = pd.DataFrame().from_dict(stages, orient='columns') + stage_df.to_csv(os.path.join(paths['out_dir'], "stage.csv")) + celerities = {r: reaches[r].geometry['celerity'] for r in reaches} + celerity_df = pd.DataFrame().from_dict(celerities, orient='columns') + celerity_df.to_csv(os.path.join(paths['out_dir'], "celerity.csv")) + discharge = {r: reaches[r].geometry['discharge'] for r in reaches} + discharge_df = pd.DataFrame().from_dict(discharge, orient='columns') + discharge_df.to_csv(os.path.join(paths['out_dir'], "discharge.csv")) + + # Run + out_df = pd.DataFrame(index=upstream.index, columns=upstream.columns) + stage_df = pd.DataFrame(index=upstream.index, columns=upstream.columns) + dt = (upstream.index[1] - upstream.index[0]).seconds / 3600 + counter = 1 + for r in upstream.columns: + if counter % 100 == 0: + print(f"{counter} / {len(upstream.columns)}") + try: + reach = reaches[r] + outflow = reach.route_hydrograph(upstream[r].to_numpy(), dt) + except Exception as e: + print(f"Error routing reach {r}: ({type(e).__name__}) {e}") + outflow = np.zeros(len(upstream)) + out_df[r] = outflow + stage_df[r] = np.interp(outflow, reach.geometry['discharge'], reach.geometry['stage']) + + os.makedirs(paths['out_dir'], exist_ok=True) + out_df.to_csv(os.path.join(paths['out_dir'], "model_outflows.csv")) + stage_df.to_csv(os.path.join(paths['out_dir'], "model_outflows_stage.csv")) + +def execute(meta_path): + # Load data + with open(meta_path, 'r') as f: + paths = json.load(f) + + meta_df = pd.read_csv(paths['meta_path']) + meta_df['comid'] = meta_df['comid'].astype(int).astype(str) + meta_df['ds_comid'] = meta_df['ds_comid'].astype(int).astype(str) + meta_df = meta_df.set_index("comid") + edge_dict = meta_df['ds_comid'].to_dict() + + lateral_df = pd.read_csv(paths['lateral_path']) + lateral_df = clean_ts(lateral_df) + lateral_df = lateral_df.reindex(sorted(lateral_df.columns), axis=1) + + inflow_df = pd.read_csv(paths['inflow_path']) + inflow_df = clean_ts(inflow_df) + inflow_df = inflow_df.reindex(sorted(inflow_df.columns), axis=1) + + # resample to 5 minute increments + resample_dt = pd.Timedelta(paths['dt']) + lateral_df = lateral_df.resample(resample_dt).ffill() + inflow_df = inflow_df.resample(resample_dt).ffill() + + # Error checking + missing_forcings = list(set(meta_df.index).difference(set(lateral_df.columns))) + print(f'Missing forcing data for: {missing_forcings}') + + # Set up reaches + reaches, missing_geom = load_geom(meta_df, source=paths['geometry_source'], geom_dir=paths['geometry_dir']) + geom_status_df = pd.DataFrame(index=meta_df.index) + geom_status_df['valid_geom'] = ~meta_df.index.isin(missing_geom) + try: + geom_status_df.to_csv(os.path.join(paths['out_dir'], "geom_status.csv")) + except PermissionError: + print("Error writing geom_status.csv. Check permissions") + + # export celerity curves + stages = {r: reaches[r].geometry['stage'] for r in reaches if type(reaches[r]) == CustomReach} + stage_df = pd.DataFrame().from_dict(stages, orient='columns') + stage_df.to_csv(os.path.join(paths['out_dir'], "stage.csv")) + celerities = {r: reaches[r].geometry['celerity'] for r in reaches if type(reaches[r]) == CustomReach} + celerity_df = pd.DataFrame().from_dict(celerities, orient='columns') + celerity_df.to_csv(os.path.join(paths['out_dir'], "celerity.csv")) + discharge = {r: reaches[r].geometry['discharge'] for r in reaches if type(reaches[r]) == CustomReach} + discharge_df = pd.DataFrame().from_dict(discharge, orient='columns') + discharge_df.to_csv(os.path.join(paths['out_dir'], "discharge.csv")) + + # Set up run + network = Network(edge_dict) + network.load_forcings(lateral_df) + network.load_headwater_forcings(inflow_df) + network.load_reaches(reaches) + + # Set up initial outflows + init_outflows = pd.read_csv(paths['init_conditions_path']) + init_outflows = init_outflows.set_index('comid') + init_outflows = {str(r): init_outflows.loc[r, 'init_discharge'] for r in init_outflows.index} + for r in init_outflows: + if np.isnan(init_outflows[r]): + init_outflows[r] = None + network.load_initial_conditions(init_outflows) # kind of unnecessary + + # Run model + network.run_event(optimize_dx=paths['optimize_dx'], conserve_mass=paths['conserve_mass'], lat_addition=paths['lat_addition'], short_ts=paths['short_ts']) + os.makedirs(paths['out_dir'], exist_ok=True) + out_path = os.path.join(paths['out_dir'], "model_outflows.csv") + network.out_df.to_csv(out_path) + out_path = os.path.join(paths['out_dir'], "model_outflows_stage.csv") + network.stage_df.to_csv(out_path) + + return network + +def handler(path): + with open(path, 'r') as f: + dict = json.load(f) + + if dict['mode'] == 'reach': + execute_by_reach(path) + elif dict['mode'] == 'basin': + execute(path) + +if __name__ == '__main__': + path = sys.argv[1] + handler(path) \ No newline at end of file diff --git a/src/muskingumcunge/network.py b/src/muskingumcunge/network.py new file mode 100644 index 0000000..ce08449 --- /dev/null +++ b/src/muskingumcunge/network.py @@ -0,0 +1,141 @@ +from collections import defaultdict +import pandas as pd +import numpy as np +import time + + +class Network: + + def __init__(self, edge_dict): + self.edge_dict = edge_dict # key=u/s, value=d/s + self.chlid_dict = defaultdict(list) + for u, v in edge_dict.items(): + self.chlid_dict[v].append(u) + + roots = set(edge_dict.values()).difference(set(edge_dict.keys())) + if len(roots) > 1: + raise ValueError(f'Multiple roots detected: {roots}') + self.root = set(edge_dict.values()).difference(set(edge_dict.keys())).pop() + self.headwaters = list() + self.post_order = list() + self.calculate_post_order() + self.reach_dict = None + self.forcing_df = None + self.channel_outflows = {k: None for k in self.edge_dict.keys()} + self.channel_outflows_stage = {k: None for k in self.edge_dict.keys()} + self.init_outflows = {k: None for k in self.edge_dict.keys()} + self.out_df = None + + def calculate_post_order(self): + self.headwaters = list() + self.post_order = list() + q = [self.root] + working = True + while working: + cur = q.pop() + children = self.chlid_dict[cur] + if len(children) == 0: + self.post_order.append(cur) + self.headwaters.append(cur) + elif all([c in self.post_order for c in children]): + self.post_order.append(cur) + else: + q.append(cur) + q.extend(children) + if len(q) == 0: + working = False + self.post_order = self.post_order[:-1] # remove root + + def export_post_order(self, path): + out_df = pd.DataFrame(self.post_order, columns=['comid']) + out_df['order'] = range(1, len(out_df) + 1) + out_df.to_csv(path, index=False) + + def load_reaches(self, reach_dict): + self.reach_dict = reach_dict # Todo: add informational note about whether all reaches in post order have been loaded + + def load_forcings(self, forcing_df): + self.forcing_df = forcing_df + self.forcing_df = self.forcing_df[self.forcing_df.columns.intersection(self.post_order)].copy() + + def load_headwater_forcings(self, headwater_forcings): + headwater_forcings = headwater_forcings.fillna(0) + missing = list(set(self.headwaters).difference(set(headwater_forcings.columns))) + print(f"Missing headwater forcings for: {missing}") + for n in headwater_forcings.columns: + self.channel_outflows[n] = headwater_forcings[n].to_numpy() + self.headwaters.append(n) + for n in missing: + self.channel_outflows[n] = np.zeros(len(self.forcing_df)) + + def load_initial_conditions(self, initial_conditions): + # loads initial outflows for each reach + self.init_outflows = initial_conditions + + def run_event(self, optimize_dx=True, conserve_mass=False, lat_addition='middle', short_ts=False): + # calculate total inflow + t_start = time.perf_counter() + dt = (self.forcing_df.index[1] - self.forcing_df.index[0]).seconds + counter = 0 + pct5 = int(len(self.post_order) / 20) + 1 + for node in self.post_order: + counter += 1 + if counter % pct5 == 0: + pct_done = int(100 * counter / len(self.post_order)) + print(f'{pct_done}% done') + + reach = self.reach_dict[node] + if node in self.headwaters: + continue + else: + children = self.chlid_dict[node] + us_hydro = [self.channel_outflows[c] for c in children] + us_hydro = np.sum(us_hydro, axis=0) + + laterals = self.forcing_df[node].to_numpy() + if lat_addition == 'top': + us_hydro = us_hydro + laterals + + if optimize_dx: + dx, subreaches = reach.optimize_route_params(us_hydro, dt) + reach.reach_length = dx + else: + subreaches = 1 + + routed = us_hydro + for i in range(subreaches): + try: + if lat_addition == 'middle': + l = laterals + else: + l = None + if i == 0 and node in self.init_outflows: + init_out = self.init_outflows[node] + else: + init_out = None + routed = reach.route_hydrograph(routed, dt, lateral=l, initial_outflow=init_out, short_ts=short_ts, solver='wrf-c') + except AssertionError as e: + print(f"Error routing {node}: {e}") + + if lat_addition == 'bottom': + routed = routed + laterals + + # enforce mass conservation + if conserve_mass: + in_flow = us_hydro.sum() + laterals.sum() + out_flow = routed.sum() + routed = routed * (in_flow / out_flow) + self.channel_outflows[node] = routed + self.channel_outflows_stage[node] = np.interp(routed, reach.geometry['discharge'], reach.geometry['stage']) + + print(f'Routing complete in {round(time.perf_counter() - t_start, 1)} seconds') + + # Post-processing + out_df = pd.DataFrame(self.channel_outflows) + out_df['dt'] = self.forcing_df.index + self.out_df = out_df.set_index('dt') + stage_df = pd.DataFrame(self.channel_outflows_stage) + stage_df['dt'] = self.forcing_df.index + self.stage_df = stage_df.set_index('dt') + + diff --git a/src/muskingumcunge/reach.py b/src/muskingumcunge/reach.py index 66f18ef..ff6ad78 100644 --- a/src/muskingumcunge/reach.py +++ b/src/muskingumcunge/reach.py @@ -1,19 +1,8 @@ import numpy as np from scipy.ndimage import gaussian_filter1d -from .route import croute +from .route import croute_fread, croute_wrf class BaseReach: - geometry = {'stage': None, - 'top_width': None, - 'area': None, - 'wetted_perimeter': None, - 'hydraulic_radius': None, - 'mannings_n': None} - rating_curve = {'stage': None, - 'discharge': None} - muskingum_params = {'stage': None, - 'k': None, - 'x': None} def __init__(self, width, mannings_n, slope, reach_length, max_stage=10, stage_resolution=50): self.width = width @@ -48,104 +37,237 @@ def calculate_x_ref(self, inflows, dt): c_ref = np.interp(np.log(q_ref), self.geometry['log_q'], self.geometry['celerity']) x_ref = 0.5 * ((c_ref * dt * 60 * 60) + (q_ref / (b_ref * self.slope * c_ref))) return x_ref + + def route_hydrograph(self, inflows, dt, lateral=None, initial_outflow=None, short_ts=False, solver='wrf-c'): + assert np.all(inflows >= 0), 'Negative inflows detected' + assert np.all(~np.isnan(inflows)), 'NaN inflows detected' + # initialize outflow array + outflows = np.zeros(inflows.shape) + if initial_outflow is not None: + outflows[0] = initial_outflow + else: + outflows[0] = inflows[0] + + # initialize lateraals if none + if lateral is None: + lateral = np.zeros_like(inflows) + + # check if rating curve covers range of inflows. If not, add more points + if max(inflows) > max(self.geometry['discharge']): + add_res = 200 + add_stages = np.linspace(0, 9 * self.geometry['stage'][-1], add_res) + add_widths = np.repeat(self.geometry['top_width'][-1], add_res) + add_areas = (add_stages * add_widths) + self.geometry['area'][-1] + add_wp = (2 * add_stages) + self.geometry['wetted_perimeter'][-1] + add_hr = add_areas / add_wp + add_n = np.repeat(self.geometry['mannings_n'][-1], add_res) + add_q = (1 / add_n) * add_areas * (add_hr ** (2 / 3)) * (self.slope ** 0.5) + dpdy = np.repeat(2, add_res) + k_prime = (5 / 3) - ((2 / 3) * (add_areas / (add_widths * add_wp)) * dpdy) + add_c = k_prime * (add_q / add_areas) + + self.geometry['stage'] = np.append(self.geometry['stage'], add_stages + self.geometry['stage'][-1]) + self.geometry['top_width'] = np.append(self.geometry['top_width'], add_widths) + self.geometry['area'] = np.append(self.geometry['area'], add_areas) + self.geometry['wetted_perimeter'] = np.append(self.geometry['wetted_perimeter'], add_wp) + self.geometry['hydraulic_radius'] = np.append(self.geometry['hydraulic_radius'], add_hr) + self.geometry['mannings_n'] = np.append(self.geometry['mannings_n'], add_n) + self.geometry['discharge'] = np.append(self.geometry['discharge'], add_q) + self.geometry['celerity'] = np.append(self.geometry['celerity'], add_c) + self.geometry['log_q'] = np.log(self.geometry['discharge']) + self.geometry['log_width'] = np.log(self.geometry['top_width']) + + assert max(inflows) < max(self.geometry['discharge']), 'Rating Curve does not cover range of flowrates in hydrograph' - def route_hydrograph_c(self, inflows, dt): - reach_length = self.reach_length - slope = self.slope - geometry = self.geometry + # initialize secant method + depth = np.interp(outflows[0], self.geometry['discharge'], self.geometry['stage']) - return croute(inflows, dt, reach_length, slope, geometry) + if solver == 'wrf': + outflows = self.route_hydrograph_wrf(inflows, outflows, lateral, depth, self.reach_length, dt, short_ts) + elif solver == 'wrf-c': + outflows = croute_wrf(inflows, outflows, lateral, depth, self.geometry['stage'], self.geometry['celerity'], self.geometry['top_width'], self.geometry['discharge'], self.slope, self.reach_length, dt, short_ts) + elif solver == 'fread': + outflows = self.route_hydrograph_fread(inflows, outflows, lateral, dt, short_ts) + elif solver == 'fread-c': + inflows = inflows + lateral # did not program middle addition for fread + outflows = croute_fread(inflows, outflows, dt, self.reach_length, self.slope, self.geometry, short_ts) - def route_hydrograph(self, inflows, dt, max_iter=1000): - outflows = list() - outflows.append((inflows[0])) - assert max(inflows) < max(self.geometry['discharge']), 'Rating Curve does not cover range of flowrates in hydrograph' - + return np.array(outflows) + + def route_hydrograph_fread(self, inflows, outflows, lateral, dt, short_ts): + max_iter = 1000 for i in range(len(inflows) - 1): - q_guess = sum([inflows[i], inflows[i + 1], outflows[i]]) / 3 + if short_ts: + us_cur = np.copy(inflows[i]) + else: + us_cur = np.copy(inflows[i + 1]) + q_guess = sum([inflows[i], us_cur, outflows[i], lateral[i]]) / 3 last_guess = q_guess * 2 counter = 1 - # while abs(last_guess - q_guess) > 0.003: # from handbook of hydrology page 328 - while counter < 2: + while abs(last_guess - q_guess) > 0.003: # from handbook of hydrology page 328 counter += 1 last_guess = q_guess.copy() - reach_q = sum([inflows[i], inflows[i + 1], outflows[i], q_guess]) / 4 + reach_q = sum([inflows[i], us_cur, outflows[i], q_guess]) / 4 # Interpolate log_reach_q = np.log(reach_q) b_tmp = np.exp(np.interp(log_reach_q, self.geometry['log_q'], self.geometry['log_width'])) c_tmp = np.interp(log_reach_q, self.geometry['log_q'], self.geometry['celerity']) - courant = c_tmp * dt * 60 * 60 / self.reach_length + courant = c_tmp * dt/ self.reach_length reynold = reach_q / (self.slope * c_tmp * self.reach_length * b_tmp) c0 = (-1 + courant + reynold) / (1 + courant + reynold) c1 = (1 + courant - reynold) / (1 + courant + reynold) c2 = (1 - courant + reynold) / (1 + courant + reynold) + c3 = (2 * courant) / (1 + courant + reynold) - k = self.reach_length / c_tmp - x = 0.5 * (1 - reynold) - - q_guess = (c0 * inflows[i + 1]) + (c1 * inflows[i]) + (c2 * outflows[i]) + q_guess = (c0 * us_cur) + (c1 * inflows[i]) + (c2 * outflows[i]) + (c3 * lateral[i]) q_guess = max(min(inflows), q_guess) + if counter == max_iter: last_guess = q_guess - outflows.append(q_guess) + outflows[i + 1] = q_guess return np.array(outflows) + - def route_hydrograph_mct(self, inflows, dt, max_iter=1000): - outflows = list() - outflows.append(inflows[0]) - assert max(inflows) < max(self.geometry['discharge']), 'Rating Curve does not cover range of flowrates in hydrograph' - - # warm up parameters - qref1 = (inflows[0] + outflows[0]) / 2 - aref1 = np.exp(np.interp(np.log(qref1), self.geometry['log_q'], self.geometry['log_area'])) - bref1 = np.exp(np.interp(np.log(qref1), self.geometry['log_q'], self.geometry['log_width'])) - cref1 = np.interp(np.log(qref1), self.geometry['log_q'], self.geometry['celerity']) - betaref1 = (cref1 * aref1) / qref1 - courantref1 = (cref1 / betaref1) * (dt * 60 * 60 / self.reach_length) - reynoldref1 = qref1 / (betaref1 * bref1 * self.slope * cref1 * self.reach_length) + def route_hydrograph_wrf(self, inflows, outflows, lateral, depth, dx, dt, short_ts): + # local variables + mindepth = 0.01 + max_iter = 100 + for i in range(len(inflows) - 1): - q_guess = outflows[i] + (inflows[i + 1] - inflows[i]) - last_guess = q_guess * 2 - counter = 1 - while abs(last_guess - q_guess) > 0.003: # from handbook of hydrology page 328 - counter += 1 - last_guess = q_guess.copy() + if not ((inflows[i] > 0.0) or (inflows[i + 1] > 0.0) or (outflows[i] > 0.0)): + depth = 0.0 + qdc = 0.0 + outflows[i + 1] = qdc + continue + Qj_0 = 0.0 + iter = 0 + rerror = 1.0 + aerror = 0.01 + tries = 0 + + h = (depth * 1.33) + mindepth + h_0 = (depth * 0.67) + + while rerror > 0.01 and aerror >= mindepth and iter <= max_iter: + qup = inflows[i] + if short_ts: + quc = inflows[i] + else: + quc = inflows[i + 1] + qdp = outflows[i] + + # Lower Interval + Ck = np.interp(h_0, self.geometry['stage'], self.geometry['celerity']) + Twl = np.interp(h_0, self.geometry['stage'], self.geometry['top_width']) + q = np.interp(h_0, self.geometry['stage'], self.geometry['discharge']) + + if Ck > 0.000000: + Km = max(dt, dx / Ck) + else: + Km = dt - qref2 = (inflows[i + 1] + q_guess) / 2 - # interpolate - aref2 = np.exp(np.interp(np.log(qref2), self.geometry['log_q'], self.geometry['log_area'])) - bref2 = np.exp(np.interp(np.log(qref2), self.geometry['log_q'], self.geometry['log_width'])) - cref2 = np.interp(np.log(qref2), self.geometry['log_q'], self.geometry['celerity']) - betaref2 = (cref2 * aref2) / qref2 - courantref2 = (cref2 / betaref2) * (dt * 60 * 60 / self.reach_length) - reynoldref2 = qref2 / (betaref2 * bref2 * self.slope * cref2 * self.reach_length) - - # MCT parameters - c0 = (-1 + courantref1 + reynoldref1) / (1 + courantref2 + reynoldref2) - c1 = ((1 + courantref1 - reynoldref1) / (1 + courantref2 + reynoldref2)) * (courantref2 / courantref1) - c2 = ((1 - courantref1 + reynoldref1) / (1 + courantref2 + reynoldref2)) * (courantref2 / courantref1) - - # Estimate outflow - q_guess = (c0 * inflows[i + 1]) + (c1 * inflows[i]) + (c2 * outflows[i]) - q_guess = max(min(inflows), q_guess) - if counter == max_iter: - last_guess = q_guess - outflows.append(q_guess) - if (cref2 / betaref2) > 1: - print((cref2 / betaref2)) - qref1 = qref2 - aref1 = aref2 - bref1 = bref2 - cref1 = bref2 - betaref1 = betaref2 - courantref1 = courantref2 - reynoldref1 = reynoldref2 + if Ck > 0.0: + X = 0.5*(1-(Qj_0/(2.0*Twl*self.slope*Ck*dx))) + X = min(0.5, max(0.0, X)) + else: + X = 0.5 + + D = (Km*(1.000 - X) + dt/2.0000) + + C1 = (Km*X + dt/2.000000)/D + C2 = (dt/2.0000 - Km*X)/D + C3 = (Km*(1.00000000-X)-dt/2.000000)/D + C4 = (lateral[i]*dt)/D + Qj_0 = ((C1*qup) + (C2*quc) + (C3*qdp) + C4) - q + + # Upper Interval + Ck = np.interp(h, self.geometry['stage'], self.geometry['celerity']) + Twl = np.interp(h, self.geometry['stage'], self.geometry['top_width']) + q = np.interp(h, self.geometry['stage'], self.geometry['discharge']) + + if Ck > 0.000000: + Km = max(dt, dx / Ck) + else: + Km = dt + + if Ck > 0.0: + X = 0.5*(1-(((C1*qup)+(C2*quc)+(C3*qdp) + C4)/(2.0*Twl*self.slope*Ck*dx))) + X = min(0.5, max(0.25, X)) + else: + X = 0.5 + + D = (Km*(1.000 - X) + dt/2.0000) + + C1 = (Km*X + dt/2.000000)/D + C2 = (dt/2.0000 - Km*X)/D + C3 = (Km*(1.00000000-X)-dt/2.000000)/D + C4 = (lateral[i]*dt)/D + Qj = ((C1*qup) + (C2*quc) + (C3*qdp) + C4) - q + + # Secant Method + if Qj - Qj_0 != 0: + h_1 = h - ((Qj * (h_0 - h))/(Qj_0 - Qj)) + if h_1 < 0.0: + h_1 = h + else: + h_1 = h + + if h > 0.0: + rerror = abs((h_1 - h) / h) + aerror = abs(h_1 - h) + else: + rerror = 0.0 + aerror = 0.9 + + h_0 = max(0.0, h) + h = max(0.0, h_1) + iter += 1 + + if h < mindepth: + break + + if iter >= max_iter: + tries += 1 + if tries <= 4: + h = h * 1.33 + h_0 = h_0 * 0.67 + max_iter += 25 + + if ((C1*qup)+(C2*quc)+(C3*qdp) + C4) < 0: + if C4 < 0 and abs(C4) > ((C1*qup)+(C2*quc)+(C3*qdp)): + qdc = 0.0 + else: + qdc = max(((C1*qup)+(C2*quc)+C4), ((C1*qup)+(C3*qdp)+C4)) + else: + qdc = ((C1*qup)+(C2*quc)+(C3*qdp) + C4) + if qdc < 0.0: + qdc = 0.0 + outflows[i + 1] = qdc + depth = h - return np.array(outflows) + return outflows + + + def optimize_route_params(self, inflows, dt): + # Ponce method + qref = (inflows.max() + inflows.min()) / 2 + cref = np.interp(qref, self.geometry['discharge'], self.geometry['celerity']) + twref = np.interp(qref, self.geometry['discharge'], self.geometry['top_width']) + dxc = dt * cref # Courant length + dxd = (qref / twref) / (self.slope * cref) # characteristic reach length + dxmax = 0.5 * (dxc + dxd) + peak_loc = np.argmax(self.geometry['discharge'] > inflows.max()) + peak_loc = max([peak_loc, 1]) + cmax = self.geometry['celerity'][:peak_loc].max() # I think the fact that this is the max should cover us for making all stable + dxmin = cmax * (dt) + dx = max([dxmin, dxmax]) + subreaches = int(np.ceil(self.reach_length / dx)) + dx = self.reach_length / subreaches + return dx, subreaches class TrapezoidalReach(BaseReach): @@ -222,6 +344,7 @@ def generate_geometry(self): da = geom['area'][1:] - geom['area'][:-1] dq_da = dq / da geom['celerity'] = np.append(dq_da, dq_da[-1]) + geom['celerity'][geom['celerity'] < 0.0001] = 0.0001 geom['log_q'] = np.log(geom['discharge']) @@ -249,22 +372,10 @@ def generate_muskingum_params(self): c[0] = c[1] self.muskingum_params['k'] = self.reach_length / c - self.muskingum_params['k'] /= (60 * 60) # Convert seconds to hours self.muskingum_params['x'] = (1 / 2) - (self.rating_curve['discharge'] / (2 * c * self.width * self.slope * self.reach_length)) self.muskingum_params['x'][self.muskingum_params['x'] < 0] = 0 class CustomReach(BaseReach): - geometry = {'stage': None, - 'top_width': None, - 'area': None, - 'wetted_perimeter': None, - 'hydraulic_radius': None, - 'mannings_n': None} - rating_curve = {'stage': None, - 'discharge': None} - muskingum_params = {'stage': None, - 'k': None, - 'x': None} def __init__(self, mannings_n, slope, reach_length, stages, top_widths, areas, perimeters): self.mannings_n = mannings_n @@ -274,7 +385,13 @@ def __init__(self, mannings_n, slope, reach_length, stages, top_widths, areas, p self.generate_geometry(stages, top_widths, areas, perimeters) def generate_geometry(self, stages, top_widths, areas, perimeters): - geom = self.geometry + geom = {'stage': None, + 'top_width': None, + 'area': None, + 'wetted_perimeter': None, + 'hydraulic_radius': None, + 'mannings_n': None, + 'discharge': None} geom['stage'] = stages geom['top_width'] = top_widths geom['log_width'] = np.log(geom['top_width']) @@ -288,7 +405,7 @@ def generate_geometry(self, stages, top_widths, areas, perimeters): geom['mannings_n'] = np.repeat(self.mannings_n, len(stages)) geom['discharge'] = (1 / geom['mannings_n']) * geom['area'] * (geom['hydraulic_radius'] ** (2 / 3)) * (self.slope ** 0.5) geom['discharge'][geom['discharge'] <= 0] = geom['discharge'][geom['discharge'] > 0].min() - self.clean_looped_rating_curve() + geom['discharge'] = self.clean_looped_rating_curve(geom['discharge']) geom['log_q'] = np.log(geom['discharge']) dp = geom['wetted_perimeter'][1:] - geom['wetted_perimeter'][:-1] @@ -304,16 +421,17 @@ def generate_geometry(self, stages, top_widths, areas, perimeters): geom['celerity'][0] = geom['celerity'][1] geom['celerity'] = np.nan_to_num(geom['celerity']) ## TODO: geom['celerity'][<0] = 0 - + self.geometry = geom - def clean_looped_rating_curve(self): + def clean_looped_rating_curve(self, discharge): # Enforce monotonic discharge inreases q_last = np.nan - for ind, q in enumerate(self.geometry['discharge']): + for ind, q in enumerate(discharge): if q < q_last: - self.geometry['discharge'][ind] = q_last + discharge[ind] = q_last else: q_last = q + return discharge class MuskingumReach: @@ -321,7 +439,7 @@ def __init__(self, k, x): self.k = k self.x = x - def route_hydrograph(self, inflows, dt): + def route_hydrograph(self, inflows, dt, lateral=None): outflows = list() outflows.append((inflows[0])) @@ -340,70 +458,128 @@ class WRFCompound(BaseReach): """ def __init__(self, bottom_width, side_slope, bankfull_depth, floodplain_width, channel_n, floodplain_n, slope, reach_length, max_stage=10, stage_resolution=50): - self.bottom_width = bottom_width - self.side_slope = side_slope - self.bankfull_depth = bankfull_depth - assert floodplain_width >= self.bottom_width + (self.side_slope * self.bankfull_depth), 'floodplain width smaller than channel width at bankfull depth' - self.floodplain_width = floodplain_width - self.channel_n = channel_n - self.floodplain_n = floodplain_n + self.Bw = bottom_width + self.z = side_slope / 2 # Should be rise/run per side + self.bfd = bankfull_depth + self.TwCC = floodplain_width + self.n = channel_n + self.nCC = floodplain_n + self.So = slope self.slope = slope self.reach_length = reach_length self.max_stage = max_stage self.resolution = stage_resolution self.generate_geometry() - - def generate_geometry(self): - geom = self.geometry + def generate_geometry(self): + geom = dict() + geom = {'stage': None, + 'top_width': None, + 'area': None, + 'wetted_perimeter': None, + 'hydraulic_radius': None, + 'mannings_n': None, + 'discharge': None} + geom['stage'] = np.linspace(0, self.max_stage, self.resolution) - geom['top_width'] = self.bottom_width + (2 * geom['stage'] * self.side_slope) - geom['area'] = ((geom['top_width'] + self.bottom_width) / 2) * geom['stage'] - geom['wetted_perimeter'] = (((((geom['stage'] * self.side_slope) ** 2) + (geom['stage'] ** 2)) ** 0.5) * 2) + self.bottom_width - geom['hydraulic_radius'] = geom['area'] / geom['wetted_perimeter'] - geom['hydraulic_radius'][0] = geom['hydraulic_radius'][1] - geom['discharge'] = (1 / self.channel_n) * geom['area'] * (geom['hydraulic_radius'] ** (2 / 3)) * (self.slope ** 0.5) - geom['dpdy'] = np.repeat(np.sqrt((self.side_slope ** 2) + 1) * 2, geom['wetted_perimeter'].shape[0]) - - # Add compound channel - bankfull_indices = (geom['stage'] > self.bankfull_depth) - tw_at_bkf = self.bottom_width + (2 * self.bankfull_depth * self.side_slope) - a_at_bkf = ((self.bottom_width + tw_at_bkf) / 2) * self.bankfull_depth - p_at_bkf = (((((self.bankfull_depth * self.side_slope) ** 2) + (self.bankfull_depth ** 2)) ** 0.5) * 2) + self.bottom_width - area = a_at_bkf + (self.floodplain_width * (geom['stage'] - self.bankfull_depth)) - perimeter = p_at_bkf + (self.floodplain_width - tw_at_bkf) + (geom['stage'] - self.bankfull_depth) - radius = area / perimeter - radius[~bankfull_indices] = 1 - n_adj = ((self.channel_n * p_at_bkf) + ((perimeter - p_at_bkf) * self.floodplain_n)) / perimeter - discharge = (1 / n_adj) * area * (radius ** (2 / 3)) * (self.slope ** 0.5) - - geom['wetted_perimeter'][bankfull_indices] = perimeter[bankfull_indices] - geom['area'][bankfull_indices] = area[bankfull_indices] - geom['top_width'][bankfull_indices] = self.floodplain_width - geom['discharge'][bankfull_indices] = discharge[bankfull_indices] - geom['dpdy'][bankfull_indices] = 2 - - k_prime = (5 / 3) - ((2 / 3) * (geom['area'] / (geom['top_width'] * geom['wetted_perimeter'])) * geom['dpdy']) - geom['celerity'] = k_prime * (geom['discharge'] / geom['area']) - geom['celerity'][0] = geom['celerity'][1] + far_out_stages = np.exp(np.linspace(np.log(self.max_stage), np.log(10 * self.max_stage), 50)) # add some hail mary stages + geom['stage'] = np.append(geom['stage'], far_out_stages) + for param in ['top_width', 'area', 'wetted_perimeter', 'hydraulic_radius', 'mannings_n', 'discharge', 'celerity']: + geom[param] = np.zeros(geom['stage'].shape) + + mask = geom['stage'] > self.bfd + vec_geom = self.vectorized_get_geom_at_stage(geom['stage']) # Ck, WP, WPC, n, nCC, AREA, AREAC, R, So + geom['celerity'] = vec_geom[0] + geom['wetted_perimeter'] = vec_geom[1] + vec_geom[2] + geom['area'] = vec_geom[5] + vec_geom[6] + geom['hydraulic_radius'] = vec_geom[7] + geom['mannings_n'] = ((vec_geom[3] * vec_geom[1]) + (vec_geom[4] * vec_geom[2])) / (vec_geom[1] + vec_geom[2]) + Twl = self.Bw + 2.0*self.z*geom['stage'] + Twl[mask] = self.TwCC + geom['top_width'] = Twl + geom['discharge'] = ((1/geom['mannings_n']) * geom['area'] * (geom['hydraulic_radius']**(2./3.)) * np.sqrt(self.So)) geom['log_q'] = np.log(geom['discharge']) - geom['log_width'] = np.log(geom['top_width']) + np.any(np.isnan(geom['discharge'])) + + self.geometry = geom + + def vectorized_get_geom_at_stage(self, h_0): + Bw = self.Bw + bfd = self.bfd + z = self.z + TwCC = self.TwCC + So = self.So + n = self.n + nCC = self.nCC + mask = h_0 > bfd + + # get areas + AREA = (Bw + h_0 * z) * h_0 + AREA[mask] = (Bw + bfd * z) * bfd + AREAC = np.zeros_like(AREA) + AREAC[mask] = (TwCC * (h_0 - bfd))[mask] + + # get wetted perimeters + WP = (Bw + 2.0 * h_0 * np.sqrt(1.0 + z*z)) + WP[mask] = (Bw + 2.0 * bfd * np.sqrt(1.0 + z*z)) + WPC = np.zeros_like(WP) + WPC[mask] = (TwCC + (2.0 * (h_0 - bfd)))[mask] + + # get hydraulic radius + R = (AREA + AREAC) / (WP + WPC) + R[np.isnan(R)] = 0 + + # get overbank celerity + Ck = (np.sqrt(So)/n)*((5./3.)*R**(2./3.) - ((2./3.)*R**(5./3.)*(2.0*np.sqrt(1.0 + z*z)/(Bw+2.0*h_0*z)))) + tmp_a = AREA+AREAC + tmp_a[tmp_a < 0.001] = 0.001 + tmp_d = h_0 - bfd + tmp_d[tmp_d < 0] = 0 + Ck[mask] = (((np.sqrt(So)/n)*((5./3.)*R**(2./3.) - ((2./3.)*R**(5./3.)*(2.0*np.sqrt(1.0 + z*z)/(Bw+2.0*bfd*z))))*AREA + ((np.sqrt(So)/(nCC))*(5./3.)*(tmp_d)**(2./3.))*AREAC)/(tmp_a))[mask] + Ck = np.maximum(0.0, Ck) + + return Ck, WP, WPC, n, nCC, AREA, AREAC, R, So + + def get_geom_at_stage(self, h_0): + Bw = self.Bw + bfd = self.bfd + z = self.z + TwCC = self.TwCC + So = self.So + n = self.n + nCC = self.nCC + + WPC = 0 + AREAC = 0 + + if h_0 > bfd: + AREA = (Bw + bfd * z) * bfd + AREAC = TwCC * (h_0 - bfd) + WP = (Bw + 2.0 * bfd * np.sqrt(1.0 + z*z)) + WPC = TwCC + (2.0 * (h_0 - bfd)) + R = (AREA + AREAC) / (WP + WPC) + else: + AREA = (Bw + h_0 * z) * h_0 + WP = (Bw + 2.0 * h_0 * np.sqrt(1.0 + z*z)) + if WP > 0: + R = AREA / WP + else: + R = 0.0 + + if h_0 > bfd: + Ck = ((np.sqrt(So)/n)*((5./3.)*R**(2./3.) - ((2./3.)*R**(5./3.)*(2.0*np.sqrt(1.0 + z*z)/(Bw+2.0*bfd*z))))*AREA + ((np.sqrt(So)/(nCC))*(5./3.)*(h_0-bfd)**(2./3.))*AREAC)/(AREA+AREAC) + Ck = max(0.0, Ck) + else: + if h_0 > 0.0: + Ck = max(0.0,(np.sqrt(So)/n)*((5./3.)*R**(2./3.) - ((2./3.)*R**(5./3.)*(2.0*np.sqrt(1.0 + z*z)/(Bw+2.0*h_0*z))))) + else: + Ck = 0.0 + return Ck, WP, WPC, n, nCC, AREA, AREAC, R, So class SigmoidalReach(BaseReach): - geometry = {'stage': None, - 'top_width': None, - 'area': None, - 'wetted_perimeter': None, - 'hydraulic_radius': None, - 'mannings_n': None} - rating_curve = {'stage': None, - 'discharge': None} - muskingum_params = {'stage': None, - 'k': None, - 'x': None} def __init__(self, mannings_n, slope, reach_length, ch_w, fp_w, bkf_el, fp_s, max_stage=10, stage_resolution=50): self.mannings_n = mannings_n diff --git a/src/muskingumcunge/route.pyx b/src/muskingumcunge/route.pyx index 57699a9..9c1998b 100644 --- a/src/muskingumcunge/route.pyx +++ b/src/muskingumcunge/route.pyx @@ -2,14 +2,12 @@ import numpy as np import cython from libc.math cimport log, exp -def croute(py_inflows, dt, reach_length, slope, geometry, max_iter=1000): - inflows: cython.double[:] = py_inflows - outflows: cython.double[:] = np.empty_like(py_inflows) +def croute_fread(inflows: cython.double[:], outflows: cython.double[:], dt: cython.float, reach_length: cython.float, slope: cython.float, geometry, short_ts: cython.bint): geom_log_q: cython.double[:] = geometry['log_q'] geom_log_w: cython.double[:] = geometry['log_width'] geom_q: cython.double[:] = geometry['discharge'] geom_c: cython.double[:] = geometry['celerity'] - series_length: cython.int = len(py_inflows) + series_length: cython.int = len(inflows) i: cython.int q_guess: cython.float last_guess: cython.float @@ -23,41 +21,205 @@ def croute(py_inflows, dt, reach_length, slope, geometry, max_iter=1000): c0: cython.float c1: cython.float c2: cython.float - min_flow: cython.float = py_inflows.min() - max_iter: cython.int = max_iter - reach_length: cython.float = reach_length - dt: cython.float = dt - slope: cython.float = slope + min_flow: cython.float = np.min(inflows) + max_iter: cython.int = 1000 assert np.log(max(inflows)) < max(geom_log_q), 'Rating Curve does not cover range of flowrates in hydrograph' - - outflows[0] = inflows[0] for i in range(series_length - 1): - q_guess = (inflows[i] + inflows[i + 1] + outflows[i]) / 3 + if short_ts: + us_cur = inflows[i] + else: + us_cur = inflows[i + 1] + q_guess = (inflows[i] + us_cur + outflows[i]) / 3 last_guess = q_guess * 2 counter = 1 while abs(last_guess - q_guess) > 0.003: counter += 1 last_guess = q_guess - reach_q = (inflows[i] + inflows[i + 1] + outflows[i] + q_guess) / 4 + reach_q = (inflows[i] + us_cur + outflows[i] + q_guess) / 4 # Interpolate log_reach_q = log(reach_q) b_tmp = exp(np.interp(log_reach_q, geom_log_q, geom_log_w)) c_tmp = np.interp(log_reach_q, geom_log_q, geom_c) - courant = c_tmp * dt * 60 * 60 / reach_length + courant = c_tmp * dt / reach_length reynold = reach_q / (slope * c_tmp * reach_length * b_tmp) c0 = (-1 + courant + reynold) / (1 + courant + reynold) c1 = (1 + courant - reynold) / (1 + courant + reynold) c2 = (1 - courant + reynold) / (1 + courant + reynold) - q_guess = (c0 * inflows[i + 1]) + (c1 * inflows[i]) + (c2 * outflows[i]) + q_guess = (c0 * us_cur) + (c1 * inflows[i]) + (c2 * outflows[i]) q_guess = max(min_flow, q_guess) if counter == max_iter: last_guess = q_guess outflows[i + 1] = q_guess + return np.array(outflows) + +def interpolate(x: cython.double, xp: cython.double[:], yp: cython.double[:]): + i: cython.int + for i in range(len(xp) - 1): + if xp[i] <= x <= xp[i + 1]: + return yp[i] + ((x - xp[i]) * ((yp[i + 1] - yp[i]) / (xp[i + 1] - xp[i]))) + return 0.0 + +def croute_wrf( + inflows: cython.double[:], + outflows: cython.double[:], + lateral: cython.double[:], + depth: cython.double, + stages: cython.double[:], + cks: cython.double[:], + twls: cython.double[:], + qs: cython.double[:], + slope: cython.double, + dx: cython.double, + dt: cython.double, + short_ts: cython.bint): + + max_iter: cython.int = 100 + mindepth: cython.double = 0.01 + i: cython.int + + Qj_0: cython.double + Qj: cython.double + iters: cython.int + rerror: cython.double + aerror: cython.double + tries: cython.int + + h: cython.double + h_0: cython.double + qup: cython.double = 0 + quc: cython.double = 0 + qdp: cython.double = 0 + qdc: cython.double = 0 + depth: cython.double + Ck: cython.double + Twl: cython.double + q: cython.double + Km: cython.double + X: cython.double + D: cython.double + C1: cython.double = 0 + C2: cython.double = 0 + C3: cython.double = 0 + C4: cython.double = 0 + h_1: cython.double + + for i in range(len(inflows) - 1): + if not ((inflows[i] > 0.0) or (inflows[i + 1] > 0.0) or (outflows[i] > 0.0)): + depth = 0.0 + qdc = 0.0 + outflows[i + 1] = qdc + continue + Qj_0 = 0.0 + iters = 0 + rerror = 1.0 + aerror = 0.01 + tries = 0 + + h = (depth * 1.33) + mindepth + h_0 = (depth * 0.67) + + while rerror > 0.01 and aerror >= mindepth and iters <= max_iter: + qup = inflows[i] + if short_ts: + quc = inflows[i] + else: + quc = inflows[i + 1] + qdp = outflows[i] + + # Lower Interval + Ck = interpolate(h_0, stages, cks) + Twl = interpolate(h_0, stages, twls) + q = interpolate(h_0, stages, qs) + + if Ck > 0.000000: + Km = max(dt, dx / Ck) + else: + Km = dt + + if Ck > 0.0: + X = 0.5*(1-(Qj_0/(2.0*Twl*slope*Ck*dx))) + X = min(0.5, max(0.0, X)) + else: + X = 0.5 + + D = (Km*(1.000 - X) + dt/2.0000) + + C1 = (Km*X + dt/2.000000)/D + C2 = (dt/2.0000 - Km*X)/D + C3 = (Km*(1.00000000-X)-dt/2.000000)/D + C4 = (lateral[i]*dt)/D + Qj_0 = ((C1*qup) + (C2*quc) + (C3*qdp) + C4) - q + + # Upper Interval + Ck = interpolate(h, stages, cks) + Twl = interpolate(h, stages, twls) + q = interpolate(h, stages, qs) + + if Ck > 0.000000: + Km = max(dt, dx / Ck) + else: + Km = dt + + if Ck > 0.0: + X = 0.5*(1-(((C1*qup)+(C2*quc)+(C3*qdp) + C4)/(2.0*Twl*slope*Ck*dx))) + X = min(0.5, max(0.25, X)) + else: + X = 0.5 + + D = (Km*(1.000 - X) + dt/2.0000) + + C1 = (Km*X + dt/2.000000)/D + C2 = (dt/2.0000 - Km*X)/D + C3 = (Km*(1.00000000-X)-dt/2.000000)/D + C4 = (lateral[i]*dt)/D + Qj = ((C1*qup) + (C2*quc) + (C3*qdp) + C4) - q + + # Secant Method + if Qj - Qj_0 != 0: + h_1 = h - ((Qj * (h_0 - h))/(Qj_0 - Qj)) + if h_1 < 0.0: + h_1 = h + else: + h_1 = h + + if h > 0.0: + rerror = abs((h_1 - h) / h) + aerror = abs(h_1 - h) + else: + rerror = 0.0 + aerror = 0.9 + + h_0 = max(0.0, h) + h = max(0.0, h_1) + iters += 1 + + if h < mindepth: + break + + if iters >= max_iter: + tries += 1 + if tries <= 4: + h = h * 1.33 + h_0 = h_0 * 0.67 + max_iter += 25 + + if ((C1*qup)+(C2*quc)+(C3*qdp) + C4) < 0: + if C4 < 0 and abs(C4) > ((C1*qup)+(C2*quc)+(C3*qdp)): + qdc = 0.0 + else: + qdc = max(((C1*qup)+(C2*quc)+C4), ((C1*qup)+(C3*qdp)+C4)) + else: + qdc = ((C1*qup)+(C2*quc)+(C3*qdp) + C4) + if qdc < 0.0: + qdc = 0.0 + outflows[i + 1] = qdc + depth = h + return np.array(outflows) \ No newline at end of file