diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index feb11061abad026daee487fbea4676b6f96ed355..7850d83d71c461226c34175a41ab44fc1aad2b75 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,7 +33,7 @@ stages: .before_script: before_script: - - BRANCH="main" + - BRANCH="lb584_060624" - echo "branch - " $BRANCH - PACKAGES_DIR="$CI_PROJECT_DIR/packages/" - echo "project dir - " $CI_PROJECT_DIR diff --git a/ews/coordinator/processor_advisory.py b/ews/coordinator/processor_advisory.py index c6fcd9b8ad836c05673db36ff4a49e2189619e62..c2e35f290ae090ff64db1cb57bbf12b548d026f1 100644 --- a/ews/coordinator/processor_advisory.py +++ b/ews/coordinator/processor_advisory.py @@ -37,7 +37,7 @@ class ProcessorAdvisory(ProcessorBase): who edits the content of the document. Uses the gitlab project EWS-advisory-builder.''' - config_advisory = config[component].copy() + config_advisory = config.copy() config_advisory['jobPath'] = jobPath diff --git a/ews/coordinator/processor_base.py b/ews/coordinator/processor_base.py index cb558f308f6911b5de3efccb78a114b6c16434d5..59a28ece82596f9d682e2cf3b633487da7e25853 100755 --- a/ews/coordinator/processor_base.py +++ b/ews/coordinator/processor_base.py @@ -21,8 +21,8 @@ from typing import List from ews.coordinator.utils import processor_utils from ews.coordinator.utils.jobstatus import Jobstatus -from ews.coordinator.utils.processor_utils import open_and_check_config, end_script, end_job, append_item_to_list, \ - clear_up +from ews.coordinator.utils.processor_utils import end_script, end_job, append_item_to_list, \ + clear_up, parse_json_file_with_tokens print("Make sure to `conda activate py3EWSepi` environment!") @@ -62,14 +62,14 @@ class ProcessorBase: # positional arguments do not start with - or -- and are always required # optional arguments start with - or -- and default is required = False - # my_parser.add_argument( - # '-p', '--component', - # type = str, - # choices = list(short_name.keys()), - # required = False, - # dest = 'component', - # help = '''Name of EWS component to process, which must be present - # in the config file.''') + my_parser.add_argument( + '-sc', '--sys_config', + metavar = 'path', + type = str, + dest = 'sys_config_path', + required = True, + help = '''path to the system config file. This file contains values which are common to all Processor + components''') my_parser.add_argument( '-c', '--config', @@ -78,9 +78,6 @@ class ProcessorBase: nargs = '+', # require at least one path dest = 'config_paths', required = True, - # default = ['config_Ethiopia_template_stripe.json'], # remove once live - # default = ['config_Bangladesh_template_stripe.json'], # remove once live - # default = ['config_Nepal_template_stripe.json'], # remove once live help = '''path to a config file(s). More than one can be provided, in which case each is worked on in turn (e.g. one each for stripe, stem, leaf). Do not place other options between these.''') @@ -154,50 +151,47 @@ class ProcessorBase: return dictionary - def build_universal_config(self, configs: list, component: str, universal_config = None) -> dict: - '''This config obtains aspects of each dict in configs that must be common to - them all. ''' - - # initialise universal config - if not universal_config: - universal_config = { - 'WorkspacePathout': set() - } - - keys = universal_config.keys() - - # get value of each key from each config file - for configFile in configs: - - try: - - config_i = open_and_check_config(configFile) - - except: - - logger.exception(f"Failure in opening or checking config {configFile}") - end_script(premature = True) - - for key in keys: - - try: - universal_config[key].add(config_i[key]) - - except KeyError: - - # key must be in component sub-dict - universal_config[key].add(config_i[component][key]) - - # Check for and keep only one value per key - for key in keys: - - if len(universal_config[key]) > 1: - logger.error(f"Config files point to multiple {key} but this script can only handle one.") - end_script(premature = True) - - universal_config[key] = universal_config[key].pop() - - return universal_config + # def build_sys_config(self, sys_config_path: str) -> dict: + # '''This config obtains aspects of each dict in configs that must be common to + # them all. ''' + # + # # initialise universal config + # # sys_config = { + # # "ProjectRoot": set(), + # # 'WorkspacePathout': set(), + # # "MetoFTP": set(), + # # "WorkspacePath": set(), + # # "ResourcesPath": set() + # # } + # + # sys_config = {} + # # keys = sys_config.keys() + # + # # get value of each key from each config file + # try: + # sys_config = open_and_check_config(sys_config_path) + # # for key in keys: + # # if key in config_i.keys(): + # # sys_config[key].add(config_i[key]) + # except: + # logger.exception(f"Failure in opening or checking config {sys_config_path}") + # end_script(premature = True) + # + # # Check for and keep only one value per key + # # for key in keys: + # # if len(sys_config[key]) > 1: + # # logger.error(f"Config files point to multiple {key} but this script can only handle one.") + # # end_script(premature = True) + # # + # # sys_config[key] = sys_config[key].pop() + # + # # config_tokens = {'ProjectRoot': sys_config['ProjectRoot'], + # # 'WorkspacePathout': sys_config['WorkspacePathout'], + # # 'MetoFTP': sys_config['MetoFTP'], + # # 'WorkspacePath': sys_config['WorkspacePath'], + # # 'ResourcesPath': sys_config['ResourcesPath']} + # + # return sys_config def prepare_job_directory(self, job_path): """ @@ -221,6 +215,7 @@ class ProcessorBase: # check initial state of each config file, and gather terms that must apply # across all provided configs + sys_config_path: str = args['sys_config_path'] config_paths: List[str] = args['config_paths'] component: str = args['component'] short_name: str = args['short_name'] @@ -228,12 +223,12 @@ class ProcessorBase: clearup: bool = args['clearup'] # load universal configuration - universal_config = self.build_universal_config(config_paths, component) - universal_config['StartString'] = start_date + sys_config = parse_json_file_with_tokens(sys_config_path) + sys_config['StartString'] = start_date # determine job directory - workspacePath = universal_config['WorkspacePathout'] - job_path: str = f'{workspacePath}{short_name}_{start_date}' + workspace_path = sys_config['WorkspacePathout'] + job_path: str = f'{workspace_path}{short_name}_{start_date}' self.prepare_job_directory(job_path) is_live: bool = args["live"] @@ -246,9 +241,9 @@ class ProcessorBase: logger.info("==========") logger.info(f"Logging started at {datetime.datetime.now().strftime('%Y %b %d %H:%M:%S')}") - logger.info(f"Universal config is\n{json.dumps(universal_config, indent = 2)}") + logger.info(f"Universal config is\n{json.dumps(sys_config, indent = 2)}") logger.info(f"Job path will be {job_path}") - workspacePath = universal_config['WorkspacePathout'] + workspace_path = sys_config['WorkspacePathout'] # note that file date represents preceding 3 hours, so day's data starts at file timestamp 0300 UTC start_time: datetime = datetime.datetime.strptime(start_date + '03', '%Y%m%d%H') @@ -290,7 +285,14 @@ class ProcessorBase: logger.info(f'Working on config {configIndex + 1} of {config_paths_length}') try: - configjson: dict = open_and_check_config(configtemplate) + configjson: dict = parse_json_file_with_tokens(configtemplate, sys_config) + # then add the sys_config keys and values to the configjson + for k, v in sys_config.items(): + if k not in configjson.keys(): + configjson[k] = v + else: + raise Exception(f"Overwriting config key {k} with value from sys_config") + except: logger.exception(f"Failure in opening or checking config {configtemplate}") # TODO: This case should test flagdir.jobStatus.__exit__() @@ -303,12 +305,12 @@ class ProcessorBase: # from configtemplate create configFileName to describe the specific job component: str = component - configFileName = f"{os.path.basename(configtemplate).replace('.json', '')}_{component}" + config_file_name = f"{os.path.basename(configtemplate).replace('.json', '')}_{component}" - configjson['ConfigFilePath'] = configFileName + configjson['ConfigFilePath'] = config_file_name # write the complete configuration file to job directory - with open(f"{job_path}/{configFileName}.json", 'w') as write_file: + with open(f"{job_path}/{config_file_name}.json", 'w') as write_file: json.dump(configjson, write_file, indent = 4) # proc_description = universal_config['ProcessInJob'] @@ -353,7 +355,7 @@ class ProcessorBase: if status.is_success() & (clearup is True): logger.info('Clearing up') - clearup_dest_dir = f"{workspacePath}/clearup/{short_name}_{start_date}/" + clearup_dest_dir = f"{workspace_path}/clearup/{short_name}_{start_date}/" Path(clearup_dest_dir).mkdir(parents = True, exist_ok = True) logger.info(f"While developing, moving directories to this directory : {clearup_dest_dir}") diff --git a/ews/coordinator/processor_deposition.py b/ews/coordinator/processor_deposition.py index 1d0be302586717d5986b748057bd015b126f00d0..93e30c7ad3ec9153a7ad33f3e0ce7e5031a3325a 100644 --- a/ews/coordinator/processor_deposition.py +++ b/ews/coordinator/processor_deposition.py @@ -44,8 +44,8 @@ class ProcessorDeposition(ProcessorBase): def process_in_job_dep(self, jobPath, status, config, component): logger.info('started process_in_job_dep()') - file_path = Template(config[component]['ServerPathTemplate']).substitute(**config) - file_name = Template(config[component]['InputFileTemplate']).substitute(**config) + file_path = Template(config['ServerPathTemplate']).substitute(**config) + file_name = Template(config['InputFileTemplate']).substitute(**config) logger.info(f"Expecting to work with {file_name}") @@ -97,7 +97,7 @@ class ProcessorDeposition(ProcessorBase): return proc_out - def process_EWS_plotting_dep(self, jobPath, config) -> [str]: + def process_EWS_plotting_dep(self, job_path, config) -> [str]: '''Returns a list of output files for transfer.''' logger.info('started process_EWS_plotting_dep()') @@ -105,29 +105,29 @@ class ProcessorDeposition(ProcessorBase): # initialise environment regions = config['SubRegionNames'] - deposition_file_name = Template(config['Deposition']['InputFileTemplate']).substitute(**config) + deposition_file_name = Template(config['InputFileTemplate']).substitute(**config) - deposition_path = f"{jobPath}/{deposition_file_name}" + deposition_path = f"{job_path}/{deposition_file_name}" # get the file name from the config # this file name can be a glob, as long as matches can all be loaded by iris - deposition_data_file_name = Template(config['Deposition']['DataFileTemplate']).substitute(**config) + deposition_data_file_name = Template(config['DataFileTemplate']).substitute(**config) name_file_wildcard = f"{deposition_path}/{deposition_data_file_name}" ews_plotting_output_globs = [] for region in regions: - output_dir = f"{jobPath}/plotting/{region.lower()}" + output_dir = f"{job_path}/plotting/{region.lower()}" Path(output_dir).mkdir(parents=True, exist_ok=True) - sys_config = config['Deposition']['EWS-Plotting']['SysConfig'] - name_extraction_config = config['Deposition']['EWS-Plotting']['NameExtractionConfig'] - run_config = config['Deposition']['EWS-Plotting']['RunConfig'] - run_config_norm = config['Deposition']['EWS-Plotting']['RunConfigNorm'] - chart_config = config['Deposition']['EWS-Plotting'][region]['ChartConfig'] - normalize = config['Deposition']['EWS-Plotting'][region]['Normalize'] + sys_config = config['EWS-Plotting']['SysConfig'] + name_extraction_config = config['EWS-Plotting']['NameExtractionConfig'] + run_config = config['EWS-Plotting']['RunConfig'] + run_config_norm = config['EWS-Plotting']['RunConfigNorm'] + chart_config = config['EWS-Plotting'][region]['ChartConfig'] + normalize = config['EWS-Plotting'][region]['Normalize'] extraction_file_prefix = 'deposition_' + region.lower() # Note that this runs all disease types available diff --git a/ews/coordinator/processor_environment.py b/ews/coordinator/processor_environment.py index de5d90b5c6130252a0c150210bf8677770240326..99f93bd82a8b3cf20e6e1896f46c5456fa824690 100644 --- a/ews/coordinator/processor_environment.py +++ b/ews/coordinator/processor_environment.py @@ -60,10 +60,10 @@ class ProcessorEnvironment(ProcessorBase): region = config['RegionName'] - pipeline_config = config["Environment"] + # pipeline_config = config["Environment"] try: logger.info(f"Calling environmental suitability 2.0 for {region}") - self.run_pipeline(pipeline_config, region, config["StartString"]) + self.run_pipeline(config, region, config["StartString"]) except: logger.exception(f"Some failure when running EnvSuitPipeline.py") raise @@ -316,10 +316,10 @@ class ProcessorEnvironment(ProcessorBase): Path(output_dir).mkdir(parents=True, exist_ok=True) - sys_config = config['Environment']['EWS-Plotting']['SysConfig'] - run_config = config['Environment']['EWS-Plotting']['RunConfig'] - chart_config = config['Environment']['EWS-Plotting'][region]['ChartConfig'] - filter_for_country = config['Environment']['EWS-Plotting'][region]['FilterForCountry'] + sys_config = config['EWS-Plotting']['SysConfig'] + run_config = config['EWS-Plotting']['RunConfig'] + chart_config = config['EWS-Plotting'][region]['ChartConfig'] + filter_for_country = config['EWS-Plotting'][region]['FilterForCountry'] # Note that this runs all disease types available diff --git a/ews/coordinator/processor_epidemiology.py b/ews/coordinator/processor_epidemiology.py index 9dccc3374db8d91cbea7e377f1de37175836ba27..b9380fdcc946bcd699487978fab8643c4c8100cc 100644 --- a/ews/coordinator/processor_epidemiology.py +++ b/ews/coordinator/processor_epidemiology.py @@ -27,10 +27,9 @@ from ews.postprocessing.epi.epi_post_processor import EPIPostPostProcessor from ews.coordinator.utils.processor_utils import ( calc_epi_date_range, - open_and_check_config, get_only_existing_globs, end_job, - disease_latin_name_dict, short_name + disease_latin_name_dict, short_name, parse_json_file_with_tokens ) logger = logging.getLogger(__name__) @@ -66,11 +65,11 @@ class ProcessorEpidemiology(ProcessorBase): for configFile in config_fns: # they should be working if the script made it this far, no need to try - config_i = open_and_check_config(configFile) + config_i = parse_json_file_with_tokens(configFile) #determine end time, from config file arg_start_date: str = input_args['start_date'] - calc_span_days = config_i['Epidemiology']['CalculationSpanDays'] + calc_span_days = config_i['CalculationSpanDays'] assert len(calc_span_days) == 2 start_time, end_time = calc_epi_date_range(arg_start_date,calc_span_days) @@ -90,7 +89,7 @@ class ProcessorEpidemiology(ProcessorBase): # create a string describing every epi calc configuration epiStrings = [] - for epiconf in config['Epidemiology']['Epi']: + for epiconf in config['Epi']: epiKwargsString = ''.join([f"{k}{v}" for k,v in epiconf['modelArguments'].items()]) # drop any repetitive elements of kwarg @@ -282,22 +281,22 @@ class ProcessorEpidemiology(ProcessorBase): # start date to reference date defines the range of the job that is being # continued. - # - Each model named under config['Epidemiology']['Epi'] will need a + # - Each model named under config['Epi'] will need a # corresponding set of arguments for reading in as for depo and env, e.g. - # config['Epidemiology']['model_name']. + # config['model_name']. # - depo and env file listers will be applied only for the reference date # to end date, so choose an apporporiate lister, e.g. # list_onefile_operational - is_continue = config['Epidemiology'].get('continue',False) + is_continue = config.get('continue',False) # initialise any needed variables reference_date_str = config['StartString'] reference_date = datetime.datetime.strptime(reference_date_str,'%Y%m%d') - start_date, end_date = calc_epi_date_range(reference_date_str,config['Epidemiology']['CalculationSpanDays']) + start_date, end_date = calc_epi_date_range(reference_date_str,config['CalculationSpanDays']) start_string = start_date.strftime('%Y-%m-%d-%H%M') start_string_short = start_date.strftime('%Y%m%d%H%M') @@ -313,10 +312,10 @@ class ProcessorEpidemiology(ProcessorBase): yesterday_date = datetime.datetime.strptime(reference_date_str,'%Y%m%d') - datetime.timedelta(days=1) yesterday_string = yesterday_date.strftime('%Y%m%d') - diseases = config['Epidemiology']['DiseaseNames'] + diseases = config['DiseaseNames'] # get list of variable names to be loaded from deposition input - depo_variable_names = config['Epidemiology']['Deposition']['VariableNames'] + depo_variable_names = config['Deposition']['VariableNames'] assert len(depo_variable_names) == len(diseases) # loop over each sub region @@ -331,7 +330,7 @@ class ProcessorEpidemiology(ProcessorBase): config['SubRegionName'] = region config['DiseaseName'] = disease - config_epi = config['Epidemiology'].copy() + config_epi = config.copy() # TODO: CAUTION: Any iterations (e.g. disease or sub-region) are hidden # in jobPath, and not retained in the config file. This is a provlem for @@ -665,31 +664,31 @@ class ProcessorEpidemiology(ProcessorBase): # initalise necessary variables from config - start_date, end_date = calc_epi_date_range(config['StartString'],config['Epidemiology']['CalculationSpanDays']) + start_date, end_date = calc_epi_date_range(config['StartString'],config['CalculationSpanDays']) start_string = start_date.strftime('%Y%m%d') end_string = end_date.strftime('%Y%m%d') - epi_case_operational = config['Epidemiology']['EWS-Plotting']['EpiCase'] + epi_case_operational = config['EWS-Plotting']['EpiCase'] if epi_case_operational == 'none': logger.info('Config specifies not to call to EWS-Plotting') return [] - diseases = config['Epidemiology']['DiseaseNames'] + diseases = config['DiseaseNames'] # initialise environment - sys_config = config['Epidemiology']['EWS-Plotting']['SysConfig'] + sys_config = config['EWS-Plotting']['SysConfig'] - chart_config = config['Epidemiology']['EWS-Plotting']['ChartConfig'] + chart_config = config['EWS-Plotting']['ChartConfig'] # use the first matching epi formulation # TODO: Is there a more efficient way to select? - epi_filename = [ce['infectionRasterFileName'] for ce in config['Epidemiology']['Epi'] if ce['model']==epi_case_operational][0] + epi_filename = [ce['infectionRasterFileName'] for ce in config['Epi'] if ce['model']==epi_case_operational][0] dep_regionnames = ['SouthAsia','Ethiopia'] - # TODO get deposition_dir from config['Epidemiology']['Deposition']['PathTemplate'] + # TODO get deposition_dir from config['Deposition']['PathTemplate'] dep_regionname = 'Ethiopia' #SouthAsia deposition_dir = f"{config['WorkspacePath']}DEPOSITION_{start_string}/WR_NAME_{dep_regionname}_{start_string}/" @@ -707,7 +706,7 @@ class ProcessorEpidemiology(ProcessorBase): disease_short = disease.lower().replace('rust','') # a fudge, guess disease type - # because config['Epidemiology']['ProcessInJob'] handles disease loop internally + # because config['ProcessInJob'] handles disease loop internally # assumes disease name is the last directory before the filename # TODO: handle multiple diseases and regions in Processor as a loop, or in the config disease_to_drop = os.path.dirname(epi_filename).split('/')[-1].replace('Rust','') @@ -715,16 +714,16 @@ class ProcessorEpidemiology(ProcessorBase): epi_filename = epi_filename.replace(disease_to_drop,disease_to_add) map_title = "Integrated prediction of Wheat $\\bf{" + disease_to_add + "}$ Rust infection" - if 'PlottingRegionName' not in config['Epidemiology']['EWS-Plotting']: + if 'PlottingRegionName' not in config['EWS-Plotting']: plotting_region_name_lower = config['RegionName'].lower() else: - plotting_region_name_lower = config['Epidemiology']['EWS-Plotting']['PlottingRegionName'].lower() + plotting_region_name_lower = config['EWS-Plotting']['PlottingRegionName'].lower() epi_seasonsofar_fn = epi_filename+'_per_ha_wheat_seasonsofar.csv' epi_seasonincforecast_fn = epi_filename+'_per_ha_wheat.csv' - seasonsofar_run_config = config['Epidemiology']['EWS-Plotting'].get('RunConfig_seasonsofar',None) + seasonsofar_run_config = config['EWS-Plotting'].get('RunConfig_seasonsofar',None) # only run season so far (i.e. historic dates) if they exist if (seasonsofar_run_config is not None) & os.path.exists(epi_seasonsofar_fn): @@ -747,7 +746,7 @@ class ProcessorEpidemiology(ProcessorBase): # prepare command for seasonplusforecast - run_config = config['Epidemiology']['EWS-Plotting']['RunConfig_seasonplusforecast'] + run_config = config['EWS-Plotting']['RunConfig_seasonplusforecast'] logger.info(f"Running EWS-Plotting with the following configs:\n{sys_config}\n{run_config}\n{chart_config}") diff --git a/ews/coordinator/processor_surveys.py b/ews/coordinator/processor_surveys.py index dbc667a7cb796407ec88df6263aeb49d9b50f804..8bc4274a7ec785ba0493b18f81333c4d3046651b 100644 --- a/ews/coordinator/processor_surveys.py +++ b/ews/coordinator/processor_surveys.py @@ -83,7 +83,7 @@ class ProcessorSurveys(ProcessorBase): logger.debug('Performing download(s) from ODK server') - credentials_filename = config['Survey']['ServerCredentialsFile'] + credentials_filename = config['ServerCredentialsFile'] with open(credentials_filename) as credentials_file: cred: dict = json.load(credentials_file) @@ -148,9 +148,9 @@ class ProcessorSurveys(ProcessorBase): # simple format alignment using edits on config # (should this need to be much more sophisticated, reconsider the workflow) - if 'FormEdits' in config['Survey']: + if 'FormEdits' in config: - form_edits = config['Survey']['FormEdits'] + form_edits = config['FormEdits'] # loop over each form for form_name, edits in form_edits.items(): @@ -264,7 +264,7 @@ class ProcessorSurveys(ProcessorBase): upload_directory = f"{jobPath}/upload" Path(upload_directory).mkdir(parents=True, exist_ok=True) - if 'Groups' in config['Survey']: + if 'Groups' in config: # if 'Groups' is defined in the config, create grouped survey files and run python version logger.info('Preparing grouped survey files') @@ -272,12 +272,12 @@ class ProcessorSurveys(ProcessorBase): Path(group_directory).mkdir(parents=True, exist_ok=True) # creating initial groups - groups = config['Survey']['Groups'] + groups = config['Groups'] # check if columns requested in GroupBy are present in the dataframe - assert all([group_by in df_join.columns for group_by in config['Survey']['GroupBy']]), 'Column(s) requested in GroupBy are not present in the dataframe' + assert all([group_by in df_join.columns for group_by in config['GroupBy']]), 'Column(s) requested in GroupBy are not present in the dataframe' - for group_by in config['Survey']['GroupBy']: + for group_by in config['GroupBy']: logger.debug(f"grouping by {group_by}") # handle NaN values @@ -294,8 +294,8 @@ class ProcessorSurveys(ProcessorBase): groups.update({i:{group_by:[i]} for i in groups_list}) # remove groups that are listed in GroupsToIgnore - if 'GroupsToIgnore' in config['Survey']: - groups_to_ignore = config['Survey']['GroupsToIgnore'] + if 'GroupsToIgnore' in config: + groups_to_ignore = config['GroupsToIgnore'] # add groups to ignore if all elements of the group are in the list of groups to ignore for group_name in groups: @@ -327,18 +327,18 @@ class ProcessorSurveys(ProcessorBase): output_directory = f"{jobPath}/source_gen/{group_name}" Path(output_directory).mkdir(parents=True, exist_ok=True) - if 'SourcesConfigs' in config['Survey'] and group_name in config['Survey']['SourcesConfigs']: - logger.info(f"Running source gen for {group_name} group wih config {config['Survey']['SourcesConfigs'][group_name]}") - sources_config = config['Survey']['SourcesConfigs'][group_name] + if 'SourcesConfigs' in config and group_name in config['SourcesConfigs']: + logger.info(f"Running source gen for {group_name} group wih config {config['SourcesConfigs'][group_name]}") + sources_config = config['SourcesConfigs'][group_name] else: - logger.info(f"Running source gen for {group_name} group wih default config {config['Survey']['SourcesConfigDefault']}") - sources_config = config['Survey']['SourcesConfigDefault'] + logger.info(f"Running source gen for {group_name} group wih default config {config['SourcesConfigDefault']}") + sources_config = config['SourcesConfigDefault'] sources_path = run_case( config_path = sources_config, survey_path = group_surveys_filepath, - survey_format = config['Survey']['SurveyFormat'], - surveyor_name_col = config['Survey']['SurveyorNameCol'], + survey_format = config['SurveyFormat'], + surveyor_name_col = config['SurveyorNameCol'], reference_date = config['StartString'], # Day 0 (current day) is always included # Days -2,-1 and 0 are needed to initialise spores in NAME @@ -374,10 +374,10 @@ class ProcessorSurveys(ProcessorBase): Path(output_directory).mkdir(parents=True, exist_ok=True) sources_path = run_case( - config_path = config['Survey']['SourcesConfigDefault'], + config_path = config['SourcesConfigDefault'], survey_path = processed_surveys_filepath, - survey_format = config['Survey']['SurveyFormat'], - surveyor_name_col = config['Survey']['SurveyorNameCol'], + survey_format = config['SurveyFormat'], + surveyor_name_col = config['SurveyorNameCol'], reference_date = config['StartString'], # Day 0 (current day) is always included # Days -2,-1 and 0 are needed to initialise spores in NAME diff --git a/ews/coordinator/survey_servers/processor_surveys_kobotoolbox.py b/ews/coordinator/survey_servers/processor_surveys_kobotoolbox.py index b9f96c58644665765a41fbe1b66044232dd18bc9..2fb8db61e91e81537f0902f36fe991c184813224 100644 --- a/ews/coordinator/survey_servers/processor_surveys_kobotoolbox.py +++ b/ews/coordinator/survey_servers/processor_surveys_kobotoolbox.py @@ -145,7 +145,7 @@ def get_kobotoolbox_form_as_csv(form_credentials: dict, jobPath: str, config: di download_success = True - skip_download: bool = config['Survey'].get('SkipServerDownload', False) + skip_download: bool = config.get('SkipServerDownload', False) if not skip_download: try: @@ -189,7 +189,7 @@ def get_kobotoolbox_form_as_csv(form_credentials: dict, jobPath: str, config: di copy_success = False days_back = 1 - acceptable_days_back = int(config['Survey']['AcceptableDowntimeDays']) + acceptable_days_back = int(config['AcceptableDowntimeDays']) logger.debug(f"Acceptable server downtime is set to {acceptable_days_back} days") while ((not copy_success) and (days_back <= acceptable_days_back)): diff --git a/ews/coordinator/survey_servers/processor_surveys_new_odk.py b/ews/coordinator/survey_servers/processor_surveys_new_odk.py index 57468437f7791b938814c8353265469e94767227..22f2804f11d00e6f581510c713959cdc52700626 100644 --- a/ews/coordinator/survey_servers/processor_surveys_new_odk.py +++ b/ews/coordinator/survey_servers/processor_surveys_new_odk.py @@ -171,7 +171,7 @@ def get_newODK_form_as_csv(form_credentials: dict, jobPath: str, config: dict, s download_success = True - skip_download: bool = config['Survey'].get('SkipServerDownload', False) + skip_download: bool = config.get('SkipServerDownload', False) if not skip_download: try: @@ -215,7 +215,7 @@ def get_newODK_form_as_csv(form_credentials: dict, jobPath: str, config: dict, s copy_success = False days_back = 1 - acceptable_days_back = int(config['Survey']['AcceptableDowntimeDays']) + acceptable_days_back = int(config['AcceptableDowntimeDays']) logger.debug(f"Acceptable server downtime is set to {acceptable_days_back} days") while ((not copy_success) and (days_back <= acceptable_days_back)): diff --git a/ews/coordinator/survey_servers/processor_surveys_new_odk2.py b/ews/coordinator/survey_servers/processor_surveys_new_odk2.py index c5b6ea42016f695a2f204cba2998476eb74d3602..3a0f40be20cc8ac42a4ea91f5f95f39dc555fe10 100644 --- a/ews/coordinator/survey_servers/processor_surveys_new_odk2.py +++ b/ews/coordinator/survey_servers/processor_surveys_new_odk2.py @@ -172,7 +172,7 @@ def get_newODK2_form_as_csv(form_credentials: dict, jobPath: str, config: dict, download_success = True - skip_download: bool = config['Survey'].get('SkipServerDownload', False) + skip_download: bool = config.get('SkipServerDownload', False) if not skip_download: try: @@ -229,7 +229,7 @@ def get_newODK2_form_as_csv(form_credentials: dict, jobPath: str, config: dict, copy_success = False days_back = 1 - acceptable_days_back = int(config['Survey']['AcceptableDowntimeDays']) + acceptable_days_back = int(config['AcceptableDowntimeDays']) logger.debug(f"Acceptable server downtime is set to {acceptable_days_back} days") while ((not copy_success) and (days_back <= acceptable_days_back)): diff --git a/ews/coordinator/survey_servers/processor_surveys_odk.py b/ews/coordinator/survey_servers/processor_surveys_odk.py index fcfe93ed3c194aafbd0a5a5862b69f0f6827712d..c39687201aa9eb4a053f50bc89829824937b7b41 100644 --- a/ews/coordinator/survey_servers/processor_surveys_odk.py +++ b/ews/coordinator/survey_servers/processor_surveys_odk.py @@ -21,7 +21,7 @@ def get_ODK_form_as_csv(form_credentials: dict, jobPath: str, config: dict, stat '''Given a dict with a single ODK form to download from an ODK Aggregate server, obtains it and converts to csv.''' - ODK_output_path_template = config['Survey'].get('ODKDatabasePathTemplate','${WorkspacePathout}/ODK_DB/') + ODK_output_path_template = config.get('ODKDatabasePathTemplate','${WorkspacePathout}/ODK_DB/') ODK_output_path = Template(ODK_output_path_template).substitute(**config) # get data from ODK server @@ -32,7 +32,7 @@ def get_ODK_form_as_csv(form_credentials: dict, jobPath: str, config: dict, stat ODK_jar = form_credentials['ODK_jar'] assert os.path.exists(ODK_jar) - skip_download: bool = config['Survey'].get('SkipServerDownload', False) + skip_download: bool = config.get('SkipServerDownload', False) ODK_download_success = True @@ -95,7 +95,7 @@ def get_ODK_form_as_csv(form_credentials: dict, jobPath: str, config: dict, stat ODK_copy_success = False days_back = 1 - acceptable_days_back = int(config['Survey']['AcceptableDowntimeDays']) + acceptable_days_back = int(config['AcceptableDowntimeDays']) logger.debug(f"Acceptable server downtime is set to {acceptable_days_back} days") while ((not ODK_copy_success) and (days_back <= acceptable_days_back)): diff --git a/ews/coordinator/survey_servers/processor_surveys_odk_sa.py b/ews/coordinator/survey_servers/processor_surveys_odk_sa.py index 1e035af2ceed69e164bca59929dbdee191006fe6..1b93de1f6cc939877ecbac2c9d47f30b0995be12 100644 --- a/ews/coordinator/survey_servers/processor_surveys_odk_sa.py +++ b/ews/coordinator/survey_servers/processor_surveys_odk_sa.py @@ -101,7 +101,7 @@ def get_ODK_SA_form_as_csv(form_credentials: dict, jobPath: str, config: dict, s '''Given a dict with a single ODK form to download from an ODK Aggregate server with South Asia formatting, obtains it and converts to csv.''' - ODK_output_path_template = config['Survey'].get('ODKDatabasePathTemplate','${WorkspacePathout}/ODK_DB/') + ODK_output_path_template = config.get('ODKDatabasePathTemplate','${WorkspacePathout}/ODK_DB/') ODK_output_path = Template(ODK_output_path_template).substitute(**config) # get data from ODK server @@ -112,7 +112,7 @@ def get_ODK_SA_form_as_csv(form_credentials: dict, jobPath: str, config: dict, s ODK_jar = form_credentials['ODK_jar'] assert os.path.exists(ODK_jar) - skip_download: bool = config['Survey'].get('SkipServerDownload', False) + skip_download: bool = config.get('SkipServerDownload', False) ODK_download_success = True @@ -207,7 +207,7 @@ def get_ODK_SA_form_as_csv(form_credentials: dict, jobPath: str, config: dict, s ODK_copy_success = False days_back = 1 - acceptable_days_back = int(config['Survey']['AcceptableDowntimeDays']) + acceptable_days_back = int(config['AcceptableDowntimeDays']) logger.debug(f"Acceptable server downtime is set to {acceptable_days_back} days") while ((not ODK_copy_success) and (days_back <= acceptable_days_back)): diff --git a/ews/coordinator/survey_servers/processor_surveys_odk_v2.py b/ews/coordinator/survey_servers/processor_surveys_odk_v2.py index 8c7fd3b21d18616938ae78273631997cfa1a268c..031eac46d8597f88d76df7789dc5572897e9377a 100644 --- a/ews/coordinator/survey_servers/processor_surveys_odk_v2.py +++ b/ews/coordinator/survey_servers/processor_surveys_odk_v2.py @@ -335,7 +335,7 @@ def get_ODKv2_form_as_csv(form_credentials: dict, jobPath: str, config: dict, st download_success = True - skip_download: bool = config['Survey'].get('SkipServerDownload', False) + skip_download: bool = config.get('SkipServerDownload', False) if not skip_download: try: @@ -386,7 +386,7 @@ def get_ODKv2_form_as_csv(form_credentials: dict, jobPath: str, config: dict, st copy_success = False days_back = 1 - acceptable_days_back = int(config['Survey']['AcceptableDowntimeDays']) + acceptable_days_back = int(config['AcceptableDowntimeDays']) logger.debug(f"Acceptable server downtime is set to {acceptable_days_back} days") while ((not copy_success) and (days_back <= acceptable_days_back)): diff --git a/ews/coordinator/survey_servers/processor_surveys_wrsis.py b/ews/coordinator/survey_servers/processor_surveys_wrsis.py index 9411ac26b57ff419f2e3cdeb2d607e9fe8257179..7d335d3369dad3d573f76487b881dbffb2210110 100644 --- a/ews/coordinator/survey_servers/processor_surveys_wrsis.py +++ b/ews/coordinator/survey_servers/processor_surveys_wrsis.py @@ -263,12 +263,12 @@ def get_WRSIS_form_as_csv(form_credentials: dict, jobPath: str, config: dict, st download_success = True - start_date = datetime.datetime.strptime(config['Survey']['SeasonStartString'],'%Y%m%d').strftime('%d-%m-%Y') + start_date = datetime.datetime.strptime(config['SeasonStartString'],'%Y%m%d').strftime('%d-%m-%Y') end_date = datetime.datetime.strptime(config['StartString'], '%Y%m%d').strftime('%d-%m-%Y') logger.debug(f'Performing download from WRSIS between {start_date} and {end_date}') - skip_download: bool = config['Survey'].get('SkipServerDownload', False) + skip_download: bool = config.get('SkipServerDownload', False) if not skip_download: try: @@ -319,7 +319,7 @@ def get_WRSIS_form_as_csv(form_credentials: dict, jobPath: str, config: dict, st copy_success = False days_back = 1 - acceptable_days_back = int(config['Survey']['AcceptableDowntimeDays']) + acceptable_days_back = int(config['AcceptableDowntimeDays']) logger.debug(f"Acceptable server downtime is set to {acceptable_days_back} days") while ((not copy_success) and (days_back <= acceptable_days_back)): diff --git a/ews/coordinator/survey_servers/processor_surveys_wrt.py b/ews/coordinator/survey_servers/processor_surveys_wrt.py index 13f7f3e1f74ec6d59f490fd2d7a168a9ec7aaffc..813056934f2df8d812f55915b31e22b00d799a22 100644 --- a/ews/coordinator/survey_servers/processor_surveys_wrt.py +++ b/ews/coordinator/survey_servers/processor_surveys_wrt.py @@ -240,12 +240,12 @@ def get_WRT_form_as_csv(form_credentials: dict, jobPath: str, config: dict, stat download_success = True - start_date = datetime.datetime.strptime(config['Survey']['SeasonStartString'],'%Y%m%d').strftime('%Y-%m-%d') + start_date = datetime.datetime.strptime(config['SeasonStartString'],'%Y%m%d').strftime('%Y-%m-%d') end_date = datetime.datetime.strptime(config['StartString'], '%Y%m%d').strftime('%Y-%m-%d') logger.debug(f'Performing download from WRT between {start_date} and {end_date}') - skip_download: bool = config['Survey'].get('SkipServerDownload', False) + skip_download: bool = config.get('SkipServerDownload', False) if not skip_download: try: @@ -300,7 +300,7 @@ def get_WRT_form_as_csv(form_credentials: dict, jobPath: str, config: dict, stat copy_success = False days_back = 1 - acceptable_days_back = int(config['Survey']['AcceptableDowntimeDays']) + acceptable_days_back = int(config['AcceptableDowntimeDays']) logger.debug(f"Acceptable server downtime is set to {acceptable_days_back} days") while ((not copy_success) and (days_back <= acceptable_days_back)): diff --git a/ews/coordinator/utils/processor_utils.py b/ews/coordinator/utils/processor_utils.py index 235843cab65c7b324f9820b8f85c6378d77a5904..5f488d50e379168d73a7cb869ae5048ccde82267 100644 --- a/ews/coordinator/utils/processor_utils.py +++ b/ews/coordinator/utils/processor_utils.py @@ -125,6 +125,22 @@ def open_and_check_config(configFile) -> dict: return config +def parse_json_file_with_tokens(file_path: str, tokens: dict = None) -> dict: + + with open(file_path, 'r') as file: + file_string: str = file.read() + file.seek(0) # reset the index to read again + file_json_dict = json.load(file) + # use the file string as a template and replace any tokens (marked as ${string}) with any keys from the + # file_json_dict that match the tokens + parsed_file_string: str = Template(file_string).safe_substitute(**file_json_dict) + if tokens is not None: + # reparse the string with any other tokens passed in as arguments + parsed_file_string: str = Template(parsed_file_string).safe_substitute(**tokens) + # finally, parse the tokenized string as a json dict + result = json.loads(parsed_file_string) + + return result def get_only_existing_globs(file_globs: [str], inplace = True): @@ -411,7 +427,6 @@ def get_input_data(job_path, config, component): copies input files to job dir, using file name templates defined in the config file :param job_path: :param config: - :param component: :return: """ @@ -424,8 +439,8 @@ def get_input_data(job_path, config, component): # Processing files available for clearing proc_out['clearup'] = [] - file_path = Template(config[component]['ServerPathTemplate']).substitute(**config) - file_name = Template(config[component]['InputFileTemplate']).substitute(**config) + file_path = Template(config['ServerPathTemplate']).substitute(**config) + file_name = Template(config['InputFileTemplate']).substitute(**config) #TODO: check if file exists already (may be the case for multiple configs in one) diff --git a/tests/integration/full/full_test_advisory.py b/tests/integration/full/full_test_advisory.py index f8f76c4d121788d98ed6635f994e734d529d47f3..915c146fcfcd956d81f46d91c11a237e72f3bc64 100644 --- a/tests/integration/full/full_test_advisory.py +++ b/tests/integration/full/full_test_advisory.py @@ -58,8 +58,8 @@ class FullTestAdvisory(BaseAdvisoryTestSuite.AdvisoryTestSuite): # may be reusing a non-timestamped output file during development, so allow extant TEST_OUT_PATH os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod diff --git a/tests/integration/full/full_test_deposition.py b/tests/integration/full/full_test_deposition.py index d1ad040b08a9fd05a581a2074f24ae8b6edeb322..6c3e0bdf5c2b3563d88dc166d0a969ae87708252 100644 --- a/tests/integration/full/full_test_deposition.py +++ b/tests/integration/full/full_test_deposition.py @@ -3,6 +3,7 @@ import os import sys from ews.coordinator.processor_deposition import ProcessorDeposition +from ews.coordinator.utils.processor_utils import parse_json_file_with_tokens from integration.partial.integration_test_utils import IntegrationTestUtils from integration.test_suites.depo_test_suite import BaseDepoTestSuite @@ -32,16 +33,22 @@ class FullTestDeposition(BaseDepoTestSuite.DepoTestSuite): @staticmethod def write_temp_run_config_file(): - default_config = IntegrationTestUtils.DEFAULT_CONFIG_FILE_PATH - default_config_dict: dict = IntegrationTestUtils.load_json_file(default_config) - run_dict: dict = copy.deepcopy(default_config_dict) - # BaseDepoTestSuite.DepoTestSuite.TEST_OUT_PATH = IntegrationTestUtils.TEST_WORKSPACE_PATH + prefix + os.sep + sys_config = IntegrationTestUtils.DEFAULT_SYS_CONFIG_FILE_PATH + sys_config_dict: dict = parse_json_file_with_tokens(sys_config) + + os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_sys_config.json" + IntegrationTestUtils.write_json_file(sys_config_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) + + run_config = IntegrationTestUtils.DEFAULT_DEPO_CONFIG_FILE_PATH + run_config_temp_dict: dict = parse_json_file_with_tokens(run_config) + run_dict: dict = copy.deepcopy(run_config_temp_dict) + run_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH run_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH - os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod diff --git a/tests/integration/full/full_test_env_suit.py b/tests/integration/full/full_test_env_suit.py index fc8d80682fc61e706e59eb4fda7f77cf66dd5df3..9537da4565582a23c8f0482aa49c6636bd035ab5 100644 --- a/tests/integration/full/full_test_env_suit.py +++ b/tests/integration/full/full_test_env_suit.py @@ -41,8 +41,8 @@ class FullTestEnvSuit(BaseEnvSuitTestSuite.EnvSuitTestSuite): run_dict['Environment']['EWS-Plotting']['Kenya']['FilterForCountry'] = "True" os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod diff --git a/tests/integration/full/full_test_epi.py b/tests/integration/full/full_test_epi.py index ca2fa3c191c54c38cb8d093f9a68ee46bc77a111..9acfd8edd25423c1ca2003294c680d680b9e11d3 100644 --- a/tests/integration/full/full_test_epi.py +++ b/tests/integration/full/full_test_epi.py @@ -64,8 +64,8 @@ class FullTestEpi(BaseEpiTestSuite.EpiTestSuite): # may be reusing a non-timestamped output file during development, so allow extant TEST_OUT_PATH os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod diff --git a/tests/integration/full/full_test_survey.py b/tests/integration/full/full_test_survey.py index 95f81d080a2cba454f8ff1758cea15bf0a7e318e..9cd082ef5d3f8b6be74d23f6e6e7e40a26f82e54 100644 --- a/tests/integration/full/full_test_survey.py +++ b/tests/integration/full/full_test_survey.py @@ -36,8 +36,8 @@ class FullTestSurvey(BaseSurveyTestSuite.SurveyTestSuite): run_dict['Survey']['SkipServerDownload'] = False os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod def run_survey_pipeline(): diff --git a/tests/integration/partial/integration_test_utils.py b/tests/integration/partial/integration_test_utils.py index 354e1e86f37499fefdcf805ddba250a812f774f6..e2e09899ff0ee33a3a9af3c9835cce9daafc10c3 100644 --- a/tests/integration/partial/integration_test_utils.py +++ b/tests/integration/partial/integration_test_utils.py @@ -17,9 +17,16 @@ class IntegrationTestUtils: EMAIL_CRED_PATH: str = "../../test_data/test_deployment/envs/Cred_gmail.json" LOGGING_CONFIG_PATH: str = "../../test_data/test_deployment/envs/test_log_config.json" - DEFAULT_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/config_EastAfrica_fc_live.json" + DEFAULT_SYS_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/sys_config_EastAfrica_fc_live.json" + DEFAULT_DEPO_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/depo_config_EastAfrica_fc_live.json" + DEFAULT_ENV_SUIT_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/env_suit_config_EastAfrica_fc_live.json" + DEFAULT_EPI_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/epi_config_EastAfrica_fc_live.json" + DEFAULT_SURVEY_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/survey_config_EastAfrica_fc_live.json" + DEFAULT_ADVISORY_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/advisory_config_EastAfrica_fc_live.json" + TEST_WORKSPACE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/workspace/" - TEMP_CONFIG_FILE_NAME: str = None + TEMP_SYS_CONFIG_FILE_NAME: str = None + TEMP_RUN_CONFIG_FILE_NAME: str = None TEST_ASSETS_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/assets/" EXAMPLE_SURVEY_FILE_PATH: str = TEST_ASSETS_PATH + "example_survey_run.zip" @@ -190,7 +197,8 @@ class IntegrationTestUtils: args_dict: dict = {} - config_paths = [IntegrationTestUtils.TEMP_CONFIG_FILE_NAME] + sys_config_path = IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME + config_paths = [IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME] # note, possible to override these values in the kwargs loop below args_dict['live'] = False @@ -198,6 +206,7 @@ class IntegrationTestUtils: args_dict['start_date'] = start_date args_dict['component'] = component args_dict['short_name'] = shortname + args_dict['sys_config_path'] = sys_config_path args_dict['config_paths'] = config_paths args_dict['log_level'] = 'info' args_dict['clearup'] = True @@ -231,7 +240,7 @@ class IntegrationTestUtils: args_dict['start_date'] = start_date args_dict['component'] = component args_dict['short_name'] = short_name - args_dict['config_paths'] = [IntegrationTestUtils.TEMP_CONFIG_FILE_NAME] + args_dict['config_paths'] = [IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME] args_dict['log_level'] = 'info' args_dict['clearup'] = True diff --git a/tests/integration/partial/test_advisory.py b/tests/integration/partial/test_advisory.py index d72450e0663ace7f33c026f8e024e5ab2cfa16bc..79c86e0716dbdd27ff07f9d2d916442a8a74f349 100644 --- a/tests/integration/partial/test_advisory.py +++ b/tests/integration/partial/test_advisory.py @@ -44,19 +44,24 @@ class TestAdvisory(BaseAdvisoryTestSuite.AdvisoryTestSuite): prefix: str = "temp_advisory_" + nowstring # prefix: str = "temp_advisory" - default_config = IntegrationTestUtils.DEFAULT_CONFIG_FILE_PATH - default_config_dict: dict = IntegrationTestUtils.load_json_file(default_config) - run_dict: dict = copy.deepcopy(default_config_dict) IntegrationTestUtils.TEST_OUT_PATH = IntegrationTestUtils.TEST_WORKSPACE_PATH + prefix + os.sep - # TestAdvisory.TEST_OUT_PATH = run_dict['WorkspacePathout'] + prefix + os.sep - run_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['ServerName'] = '' # nothing, as local machine + os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) + + sys_config_path = IntegrationTestUtils.DEFAULT_SYS_CONFIG_FILE_PATH + sys_config_dict: dict = IntegrationTestUtils.load_json_file(sys_config_path) + sys_config_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH + sys_config_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH + IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_sys_config.json" + IntegrationTestUtils.write_json_file(sys_config_dict, IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME) + + run_config = IntegrationTestUtils.DEFAULT_ADVISORY_CONFIG_FILE_PATH + run_config_dict: dict = IntegrationTestUtils.load_json_file(run_config) + IntegrationTestUtils.TEST_OUT_PATH = IntegrationTestUtils.TEST_WORKSPACE_PATH + prefix + os.sep # may be reusing a non-timestamped output file during development, so allow extant TEST_OUT_PATH os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_config_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod diff --git a/tests/integration/partial/test_deposition.py b/tests/integration/partial/test_deposition.py index f4d9b20c70052928a6c658786ec4273b000cdfc1..ac21193d3b3230783dd391377495eb7dc32ae497 100644 --- a/tests/integration/partial/test_deposition.py +++ b/tests/integration/partial/test_deposition.py @@ -45,20 +45,25 @@ class TestDeposition(BaseDepoTestSuite.DepoTestSuite): prefix: str = "temp_depo_" + nowstring # prefix: str = "temp_depo" - default_config = IntegrationTestUtils.DEFAULT_CONFIG_FILE_PATH - default_config_dict: dict = IntegrationTestUtils.load_json_file(default_config) - run_dict: dict = copy.deepcopy(default_config_dict) + # may be reusing a non-timestamped output file during development, so allow extant TEST_OUT_PATH IntegrationTestUtils.TEST_OUT_PATH = IntegrationTestUtils.TEST_WORKSPACE_PATH + prefix + os.sep - run_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['ServerName'] = '' # nothing, as local machine + os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) + + sys_config_path = IntegrationTestUtils.DEFAULT_SYS_CONFIG_FILE_PATH + sys_config_dict: dict = IntegrationTestUtils.load_json_file(sys_config_path) + sys_config_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH + sys_config_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH + IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_sys_config.json" + IntegrationTestUtils.write_json_file(sys_config_dict, IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME) + + run_config_path = IntegrationTestUtils.DEFAULT_DEPO_CONFIG_FILE_PATH + run_config_dict: dict = IntegrationTestUtils.load_json_file(run_config_path) + # run_config_dict: dict = copy.deepcopy(run_config_dict) full_server_path = os.path.abspath(IntegrationTestUtils.TEST_ASSETS_PATH) - run_dict['Deposition']['ServerPathTemplate'] = full_server_path + run_config_dict['ServerPathTemplate'] = full_server_path - # may be reusing a non-timestamped output file during development, so allow extant TEST_OUT_PATH - os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_config_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod diff --git a/tests/integration/partial/test_env_suit.py b/tests/integration/partial/test_env_suit.py index 9ade40d8496dcce1be9080ac360d3243e7913b2d..9153a62582957213100b96def13b1ec66609c94c 100644 --- a/tests/integration/partial/test_env_suit.py +++ b/tests/integration/partial/test_env_suit.py @@ -45,23 +45,28 @@ class TestEnvSuit(BaseEnvSuitTestSuite.EnvSuitTestSuite): prefix: str = "temp_env_" + nowstring # prefix: str = "temp_env" - default_config = IntegrationTestUtils.DEFAULT_CONFIG_FILE_PATH - default_config_dict: dict = IntegrationTestUtils.load_json_file(default_config) - run_dict: dict = copy.deepcopy(default_config_dict) + # may be reusing a non-timestamped output file during development, so allow extant TEST_OUT_PATH IntegrationTestUtils.TEST_OUT_PATH = IntegrationTestUtils.TEST_WORKSPACE_PATH + prefix + os.sep - run_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['Environment']['WORK_PATH'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['Environment']['INPUT_PATH'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['Environment']['OUTPUT_PATH'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['ServerName'] = '' # nothing, as local machine + os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) + + sys_config_path = IntegrationTestUtils.DEFAULT_SYS_CONFIG_FILE_PATH + sys_config_dict: dict = IntegrationTestUtils.load_json_file(sys_config_path) + sys_config_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH + sys_config_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH + IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_sys_config.json" + IntegrationTestUtils.write_json_file(sys_config_dict, IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME) + + run_config_path = IntegrationTestUtils.DEFAULT_ENV_SUIT_CONFIG_FILE_PATH + run_config_dict: dict = IntegrationTestUtils.load_json_file(run_config_path) + # run_config_dict: dict = copy.deepcopy(run_config_dict) + run_config_dict['WORK_PATH'] = IntegrationTestUtils.TEST_OUT_PATH + run_config_dict['INPUT_PATH'] = IntegrationTestUtils.TEST_OUT_PATH + run_config_dict['OUTPUT_PATH'] = IntegrationTestUtils.TEST_OUT_PATH full_server_path = os.path.abspath(IntegrationTestUtils.TEST_ASSETS_PATH) - run_dict['Environment']['ServerPathTemplate'] = full_server_path + run_config_dict['ServerPathTemplate'] = full_server_path - # may be reusing a non-timestamped output file during development, so allow extant TEST_OUT_PATH - os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_config_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod diff --git a/tests/integration/partial/test_epi.py b/tests/integration/partial/test_epi.py index b9466d63956e6e5c00cadd3b5bc3e35a09fa1c39..8a61e8da178c0302b3cb3ce8b5dc1980bc968992 100644 --- a/tests/integration/partial/test_epi.py +++ b/tests/integration/partial/test_epi.py @@ -41,19 +41,23 @@ class TestEpi(BaseEpiTestSuite.EpiTestSuite): prefix: str = "temp_epi_" + nowstring # prefix: str = "temp_epi" - default_config = IntegrationTestUtils.DEFAULT_CONFIG_FILE_PATH - default_config_dict: dict = IntegrationTestUtils.load_json_file(default_config) - run_dict: dict = copy.deepcopy(default_config_dict) IntegrationTestUtils.TEST_OUT_PATH = IntegrationTestUtils.TEST_WORKSPACE_PATH + prefix + os.sep - run_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['ServerName'] = '' # nothing, as local machine - run_dict['Epidemiology']['CalculationSpanDays'] = [0, 1] - os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - name = IntegrationTestUtils.TEMP_CONFIG_FILE_NAME - IntegrationTestUtils.write_json_file(run_dict, name) + + sys_config_path = IntegrationTestUtils.DEFAULT_SYS_CONFIG_FILE_PATH + sys_config_dict: dict = IntegrationTestUtils.load_json_file(sys_config_path) + sys_config_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH + sys_config_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH + IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_sys_config.json" + IntegrationTestUtils.write_json_file(sys_config_dict, IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME) + + run_config = IntegrationTestUtils.DEFAULT_EPI_CONFIG_FILE_PATH + run_config_dict: dict = IntegrationTestUtils.load_json_file(run_config) + run_config_dict['ServerName'] = '' # nothing, as local machine + run_config_dict['CalculationSpanDays'] = [0, 1] + + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_config_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod diff --git a/tests/integration/partial/test_survey.py b/tests/integration/partial/test_survey.py index 52edb0f0c663edbfe2a832b9bc23a295185b681a..59edcf4e4812ee6c64e470dc17440565e7fbda9e 100644 --- a/tests/integration/partial/test_survey.py +++ b/tests/integration/partial/test_survey.py @@ -42,21 +42,29 @@ class TestSurvey(BaseSurveyTestSuite.SurveyTestSuite): prefix: str = "temp_survey_" + nowstring # prefix: str = "temp_survey" - default_config = IntegrationTestUtils.DEFAULT_CONFIG_FILE_PATH - default_config_dict: dict = IntegrationTestUtils.load_json_file(default_config) - run_dict: dict = copy.deepcopy(default_config_dict) IntegrationTestUtils.TEST_OUT_PATH = IntegrationTestUtils.TEST_WORKSPACE_PATH + prefix + os.sep - run_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['Environment']['WORK_PATH'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['Environment']['INPUT_PATH'] = IntegrationTestUtils.TEST_OUT_PATH - run_dict['Environment']['OUTPUT_PATH'] = IntegrationTestUtils.TEST_OUT_PATH + os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) + + sys_config_path = IntegrationTestUtils.DEFAULT_SYS_CONFIG_FILE_PATH + sys_config_dict: dict = IntegrationTestUtils.load_json_file(sys_config_path) + sys_config_dict['WorkspacePathout'] = IntegrationTestUtils.TEST_OUT_PATH + sys_config_dict['WorkspacePath'] = IntegrationTestUtils.TEST_OUT_PATH + IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_sys_config.json" + IntegrationTestUtils.write_json_file(sys_config_dict, IntegrationTestUtils.TEMP_SYS_CONFIG_FILE_NAME) + + run_config = IntegrationTestUtils.DEFAULT_SURVEY_CONFIG_FILE_PATH + run_config_dict: dict = IntegrationTestUtils.load_json_file(run_config) + run_dict: dict = copy.deepcopy(run_config_dict) + IntegrationTestUtils.TEST_OUT_PATH = IntegrationTestUtils.TEST_WORKSPACE_PATH + prefix + os.sep + run_dict['WORK_PATH'] = IntegrationTestUtils.TEST_OUT_PATH + run_dict['INPUT_PATH'] = IntegrationTestUtils.TEST_OUT_PATH + run_dict['OUTPUT_PATH'] = IntegrationTestUtils.TEST_OUT_PATH run_dict['ServerName'] = '' # nothing, as local machine # may be reusing a non-timestamped output file during development, so allow extant TEST_OUT_PATH os.makedirs(IntegrationTestUtils.TEST_OUT_PATH, exist_ok = True) - IntegrationTestUtils.TEMP_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" - IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_CONFIG_FILE_NAME) + IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME = IntegrationTestUtils.TEST_OUT_PATH + "temp_config.json" + IntegrationTestUtils.write_json_file(run_dict, IntegrationTestUtils.TEMP_RUN_CONFIG_FILE_NAME) @staticmethod def unpack_dependencies(): diff --git a/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/depo_config_EastAfrica_fc_live.json b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/depo_config_EastAfrica_fc_live.json new file mode 100644 index 0000000000000000000000000000000000000000..2fe47aa770638840c76c13e3355ac41cbb0645fe --- /dev/null +++ b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/depo_config_EastAfrica_fc_live.json @@ -0,0 +1,23 @@ +{ + "ServerPathTemplate": "OVERRIDDEN", + "InputFileTemplate": "WR_NAME_Ethiopia_${StartString}_fc", + "DataFileTemplate": "deposition_srcs_allregions_${StartString}.nc", + "TimeExpectedAvailable": "0800", + "ProcessPreJob": "process_pre_job_server_download", + "ProcessInJob": "process_in_job_dep", + "ProcessEWSPlotting": "process_EWS_plotting_dep", + "EWS-Plotting": { + "SysConfig": "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/sys/SYS_CONFIG_PINE.json", + "NameExtractionConfig": "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/name_extraction/NAME_EXTRACTION_CONFIG.json", + "RunConfig": "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/deposition/RUN_CONFIG_DEPO.json", + "RunConfigNorm": "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/deposition/RUN_CONFIG_DEPO_NORMALIZED.json", + "EastAfrica": { + "ChartConfig": "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/chart/CHART_CONFIG_EAST_AFRICA_PINE.json", + "Normalize": "False" + }, + "Ethiopia": { + "ChartConfig": "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/chart/CHART_CONFIG_ETHIOPIA_PINE.json", + "Normalize": "True" + } + } +} diff --git a/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/env_suit_config_EastAfrica_fc_live.json b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/env_suit_config_EastAfrica_fc_live.json new file mode 100644 index 0000000000000000000000000000000000000000..889190a4def52294dd13e325465ed5edb5b2decb --- /dev/null +++ b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/env_suit_config_EastAfrica_fc_live.json @@ -0,0 +1,92 @@ +{ + "ServerPathTemplate" : "/storage/sftp/metofficeupload/upload/Ethiopia/fromMO/daily_name/", + "InputFileTemplate" : "WR_EnvSuit_Met_Ethiopia_${StartString}_fc", + "TimeExpectedAvailable" : "0800", + "ProcessPreJob" : "process_pre_job_server_download", + "ProcessInJob" : "process_in_job_env2_0", + "ProcessEWSPlotting" : "process_EWS_plotting_env2_0", + "RESOURCES_PATH": "../../test_data/test_deployment/regions/EastAfrica/resources/met_extractor/", + "WORK_PATH" : "../../test_data/test_deployment/regions/EastAfrica/workspace/", + "INPUT_PATH" : "../../test_data/test_deployment/regions/EastAfrica/workspace/", + "OUTPUT_PATH" : "../../test_data/test_deployment/regions/EastAfrica/workspace/", + "SYS_CONFIG" : "../../test_data/test_deployment/regions/EastAfrica/resources/met_extractor/configs/SYS_CONFIG_PINE.json", + "FIELD_NAME_CONSTANTS" : "../../test_data/test_deployment/regions/EastAfrica/resources/met_extractor/configs/FIELD_NAME_CONSTANTS.csv", + "RUN_TYPE" : "operational", + "EXTRACTION_DAYS": 7, + "FORECAST_DAYS": 6, + "STRAINS": ["LeafRust","StemRust","StripeRust","LeafRust_TempOnly","StemRust_TempOnly","StripeRust_TempOnly"], + "PARAMS": { + "LeafRust": { + "suitability_modules": ["semibool_dewperiod"], + "past_steps": 0, + "future_steps": 2, + "thresholds": { + "temperature": [2,15,20,30], + "precipitation": 0, + "relative_humidity": 90 + } + }, + "StemRust": { + "suitability_modules": ["semibool_dewperiod"], + "past_steps": 0, + "future_steps": 2, + "thresholds": { + "temperature": [2,15,24,30], + "precipitation": 0, + "relative_humidity": 90 + } + }, + "StripeRust": { + "suitability_modules": ["v1_dewperiod"], + "past_steps": 0, + "future_steps": 7, + "thresholds": { + "temperature": [2.37,19.8], + "precipitation": 0, + "relative_humidity": 90 + } + }, + "LeafRust_TempOnly": { + "suitability_modules": ["semibool_dewperiod"], + "past_steps": 0, + "future_steps": 2, + "thresholds": { + "temperature": [2,15,20,30], + "precipitation": -1, + "relative_humidity": 90 + } + }, + "StemRust_TempOnly": { + "suitability_modules": ["semibool_dewperiod"], + "past_steps": 0, + "future_steps": 2, + "thresholds": { + "temperature": [2,15,24,30], + "precipitation": -1, + "relative_humidity": 90 + } + }, + "StripeRust_TempOnly": { + "suitability_modules": ["v1_dewperiod"], + "past_steps": 0, + "future_steps": 7, + "thresholds": { + "temperature": [2.37,19.8], + "precipitation": -1, + "relative_humidity": 90 + } + } + }, + "EWS-Plotting" : { + "SysConfig" : "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/sys/SYS_CONFIG_PINE.json", + "RunConfig" : "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/env_suit/RUN_CONFIG_ENV.json", + "EastAfrica" : { + "ChartConfig" : "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/chart/CHART_CONFIG_EAST_AFRICA_PINE.json", + "FilterForCountry" : "False" + }, + "Ethiopia" : { + "ChartConfig" : "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/chart/CHART_CONFIG_ETHIOPIA_PINE.json", + "FilterForCountry" : "True" + } + } +} diff --git a/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/epi_config_EastAfrica_fc_live.json b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/epi_config_EastAfrica_fc_live.json new file mode 100644 index 0000000000000000000000000000000000000000..c7c4a789424964b5ab086f444d12455c2343ecea --- /dev/null +++ b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/epi_config_EastAfrica_fc_live.json @@ -0,0 +1,90 @@ +{ + "DiseaseNames" : ["StemRust"], + "CalculationSpanDays" : [0,1], + "TimeStep_hours": "3", + "ProcessPreJob" : "process_pre_job_epi", + "ProcessInJob" : "process_in_job_epi", + "ProcessEWSPlotting" : "process_EWS_plotting_epi", + "Host" : { + "MaxFieldsPerCell" : "1", + "TargetRaster" : "../../test_data/test_deployment/regions/EastAfrica/resources/epimodel/assets/wheat_area_frac_MapSPAM2010_EastAfrica_clipped.tif", + "HostRasters" : { + "201001010000" : "../../test_data/test_deployment/regions/EastAfrica/resources/epimodel/assets/wheat_area_frac_MapSPAM2010_EastAfrica_clipped.tif" + } + }, + "Deposition" : { + "VariableNames" : ["P_GRAMINIS_DEPOSITION"], + "PathTemplate" : "${WorkspacePath}DEPOSITION_${DateString}/WR_NAME_Ethiopia_${DateString}_fc/", + "SuccessFileTemplate" : "${WorkspacePath}DEPOSITION_${StartString}/STATUS_SUCCESS", + "FileListerFunction" : "list_onefile_operational", + "FileNameTemplate" : "deposition_srcs_allregions_${DateString}.nc", + "FileNamePrepared" : "?" + }, + "Environment" : { + "PathTemplate" : "${WorkspacePath}ENVIRONMENT_2.0_${DateString}/processed/${RegionName}/${DiseaseName}/", + "SuccessFileTemplate" : "${WorkspacePath}ENVIRONMENT_2.0_${StartString}/STATUS_SUCCESS", + "FileListerFunction" : "list_onefile_operational", + "FileNameTemplate" : "RIE_value.nc", + "FileNamePrepared" : "?" + }, + "Env" : { + "SuccessFileTemplate" : "${WorkspacePath}EPI_${YesterdayString}/STATUS_SUCCESS", + "PathTemplate" : "${WorkspacePath}EPI_${YesterdayString}/${RegionName}/${DiseaseName}/", + "FileNameTemplate" : "infections_config_EastAfrica_fc_live_Epidemiology_*_env_progression.csv", + "FileListerFunction" : "list_onefile_historical", + "FileLoaderFunction" : "load_and_restructure_epi_file", + "FileNamePrepared" : "?" + }, + "ps" : { + "SuccessFileTemplate" : "${WorkspacePath}EPI_${YesterdayString}/STATUS_SUCCESS", + "PathTemplate" : "${WorkspacePath}EPI_${YesterdayString}/${RegionName}/${DiseaseName}/", + "FileNameTemplate" : "infections_config_EastAfrica_fc_live_Epidemiology_*_psbeta0.004gamma0.00025alpha1.0_progression.csv", + "FileListerFunction" : "list_onefile_historical", + "FileLoaderFunction" : "load_and_restructure_epi_file", + "FileNamePrepared" : "?" + }, + "Epi" : [ + { + "model" : "Env", + "modelArguments" : {}, + "infectionRasterFileName" : "?", + "description": "env. suitability", + "rescale_output_by_host_raster": false, + "analysis" : { + "vmin" : 0.0e+0, + "vmax" : 1.5e+1, + "subplot_position" : [0,0], + "cmapString" : "CMRmap_r", + "bounds" : [32.5,48.0,3.3,15.0], + "UTMprojection" : 37 + } + },{ + "model" : "ps", + "modelArguments": { + "beta": 4.0e-3, + "gamma": 2.5e-4, + "alpha": 1.0, + "infection": "previous" + }, + "infectionRasterFileName" : "?", + "description": "SE[beta*log10(alpha*D)+gamma*P]", + "rescale_output_by_host_raster": true, + "analysis" : { + "vmin" : 0.0e+0, + "vmax" : 1, + "subplot_position" : [0,1], + "cmapString" : "CMRmap_r", + "bounds": [32.5,48.0,3.3,15.0], + "UTMprojection" : 37 + } + } + ], + "EWS-Plotting" : { + "SysConfig" : "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/sys/SYS_CONFIG_PINE.json", + "RunConfig_seasonsofar" : "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/epi/RUN_CONFIG_EPI.json", + "RunConfig_seasonplusforecast" : "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/epi/RUN_CONFIG_EPI.json", + "ChartConfig" : "../../test_data/test_deployment/regions/EastAfrica/resources/plotting/configs/chart/CHART_CONFIG_ETHIOPIA_PINE.json", + "PlottingRegionName" : "Ethiopia", + "EpiCase" : "ps" + } +} diff --git a/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/survey_config_EastAfrica_fc_live.json b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/survey_config_EastAfrica_fc_live.json new file mode 100644 index 0000000000000000000000000000000000000000..b134251000381e1be0c6348d194748ff9be029f5 --- /dev/null +++ b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/survey_config_EastAfrica_fc_live.json @@ -0,0 +1,40 @@ +{ + "ProcessPreJob" : "process_pre_job_survey", + "ProcessInJob" : "process_in_job_survey", + "ProcessEWSPlotting": "process_EWS_plotting_survey", + "AcceptableDowntimeDays": 70, + "SeasonStartString" : "20220930", + "SkipServerDownload" : true, + "ServerCredentialsFile" : "../../test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/Cred-ODK-EIAR.json", + "ServerPathExtra" : "OVERRIDDEN", + "FormEdits" : { + "wheat_rust_survey_1_0" : { + "add" : { + "Origin" : "ODK-server", + "PublishedLevel" : "Raw" + + } + }, + "akpyJHvYxkLKPkxFJnPyTW" : { + "add" : { + "Origin" : "kobo-server", + "PublishedLevel" : "Raw" + }, + "filter_by_list": { + "surveyor_infromation-country" : ["Kenya", "Ethiopia"] + } + }, + "WRSIS" : {} + }, + "Groups" : { + "PROD" : { + "Origin" : ["ODK-server", "kobo-server"] + } + }, + "GroupBy" : ["Origin"], + "GroupsToIgnore" : ["ODK-server", "kobo-server", "newODK", "newODK2", "CSV-CAM"], + "SurveyFormat" : "ODK", + "SurveyorNameCol" : "surveyor_infromation-surveyor_name", + "SourcesRegionName" : "EastAfrica", + "SourcesConfigDefault" : "../../test_data/test_deployment/regions/EastAfrica/resources/source_gen/configs/config_EastAfrica_mapspam2017.json" +} diff --git a/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/sys_config_EastAfrica_fc_live.json b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/sys_config_EastAfrica_fc_live.json new file mode 100644 index 0000000000000000000000000000000000000000..a47eb7a22c6ceea1a0dc75b714501bf2db47a00e --- /dev/null +++ b/tests/test_data/test_deployment/regions/EastAfrica/resources/coordinator/configs/sys_config_EastAfrica_fc_live.json @@ -0,0 +1,9 @@ +{ + "RegionName" : "EastAfrica", + "SubRegionNames" : ["EastAfrica","Ethiopia"], + "StartTime" : "?", + "StartString" : "?", + "WorkspacePathout" : "set_in_the_code", + "WorkspacePath" : "set_in_the_code", + "ResourcesPath" : "../../test_data/test_deployment/regions/EastAfrica/resources/" +}