import argparse
import datetime
import glob
import json
import os

from typing import List
from unittest import TestSuite, TestLoader, TestCase, TestResult
from zipfile import ZipFile

from HTMLTestRunner import HTMLTestRunner

from ews.coordinator.processor_base import ProcessorBase
from ews.coordinator.utils import processor_utils


class IntegrationTestUtils:

    EMAIL_CRED_PATH: str = "../../test_data/test_deployment/envs/Cred_gmail.json"
    LOGGING_CONFIG_PATH: str = "../../test_data/test_deployment/envs/test_log_config.json"
    DEFAULT_SYS_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/configs/coordinator/sys_config_EastAfrica_fc_live.json"
    DEFAULT_DEPO_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/configs/coordinator/depo_config_EastAfrica_fc_live.json"
    DEFAULT_ENV_SUIT_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/configs/coordinator/env_suit_config_EastAfrica_fc_live.json"
    DEFAULT_EPI_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/configs/coordinator/epi_config_EastAfrica_fc_live.json"
    DEFAULT_SURVEY_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/configs/coordinator/survey_config_EastAfrica_fc_live.json"
    DEFAULT_ADVISORY_CONFIG_FILE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/configs/coordinator/advisory_config_EastAfrica_fc_live.json"

    TEST_WORKSPACE_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/workspace/"

    """
    The RUN_SYS_CONFIG_FILE_PATH and RUN_CONFIG_FILE_PATH variables are used to store the file paths of the json configs 
    that are written in the job dir for a specific test run. They are used by partial and full integration tests.    
    """
    RUN_SYS_CONFIG_FILE_PATH: str = None
    RUN_CONFIG_FILE_PATH: str = None

    TEST_ASSETS_PATH: str = "../../test_data/test_deployment/regions/EastAfrica/resources/assets/coordinator/"
    EXAMPLE_SURVEY_FILE_PATH: str = TEST_ASSETS_PATH + "example_survey_run.zip"
    EXAMPLE_DEPO_FILE_PATH: str = TEST_ASSETS_PATH + "example_depo_run.zip"
    EXAMPLE_ENV_SUIT_FILE_PATH: str = TEST_ASSETS_PATH + "example_env_suit_run.zip"

    '''
    The TEST_OUT_PATH root dir is set in the partial integration tests in the 'write_temp_run_config_files' method
    e.g.:
    TestAdvisory.write_temp_run_config_files
    
    For full integration tests, the out dir is passed in as a command line arg and is set in:
    IntegrationTestUtils.run_full_integration_test_pipeline
    '''
    TEST_OUT_PATH: str = None

    '''
    The TEST_START_DATE and TEST_JOB_DIR is set in the partial integration tests in the 'set_expected_values' method 
    e.g.:
    TestAdvisory.set_expected_values
    
    For Full integration tests, the date and out dir are passed in as a command line arg and is set in:
    IntegrationTestUtils.run_full_integration_test_pipeline
    '''
    TEST_START_DATE: str = None
    TEST_JOB_DIR: str = None

    @staticmethod
    def check_resources_exist():
        """
        do a single check for the email cred file, which will raise an error suggesting that the user may not have set
        their pythonpath correctly
        """
        emailcred_exists = os.path.exists(IntegrationTestUtils.EMAIL_CRED_PATH)
        if not emailcred_exists:
            raise FileNotFoundError(f"email cred file {IntegrationTestUtils.EMAIL_CRED_PATH} not found - have you set "
                                    f"the working directory to the same directory as the tests you are running?"
                                    f" (tests use a path relative to the test directory)")

    @staticmethod
    def build_arg_parser() -> argparse.ArgumentParser:
        parser = argparse.ArgumentParser()
        parser.add_argument('--sys_config', required = True)
        parser.add_argument('--config', required = True)
        parser.add_argument('--outdir', required = True)
        parser.add_argument('--email_cred', required = True)
        parser.add_argument('--test_report_dir', required = False)
        # parser.add_argument('--run_date_type', required = False)
        parser.add_argument('--custom_run_date', required = False)
        parser.add_argument('--custom_dir_prefix', required = False, help = "prefix for the test dir, a sensible value will be set as default")
        parser.add_argument('unittest_args', nargs='*')
        return parser

    @staticmethod
    def generate_output_path(filename_prefix: str) -> str:
        # if an environment variable "TEST_OUTDIR" is set, use that as the output path
        if os.getenv("TEST_OUTDIR"):
            result = os.getenv("TEST_OUTDIR") + filename_prefix + "/"
        else:
            result = IntegrationTestUtils.TEST_WORKSPACE_PATH + filename_prefix + "/"

        return result

    @staticmethod
    def run_full_integration_test_pipeline(test_case: [TestCase],
                                           test_prefix: str,
                                           processor_dir: str) -> bool:

        """
        Runs the full integration tests on the production server GitlabRunner. The full integration tests are run on
        a GitlabRunner that runs on the production server, and mounts the local file system to use the production
        configuration files.

        :param test_case:
        :param test_prefix:
        :param processor_dir:
        :return:
        """

        _parser = IntegrationTestUtils.build_arg_parser()

        _args = _parser.parse_args()
        _sys_config_file: str = _args.sys_config
        _run_config_file: str = _args.config
        _outdir: str = _args.outdir
        _email_cred_path: str = _args.email_cred
        _test_report_dir: str = _args.test_report_dir
        # _run_date_type: str = _args.run_date_type
        _custom_run_date: str = _args.custom_run_date
        _custom_dir_prefix: str = _args.custom_dir_prefix

        """
        We store paths to the production run json files here, these are then read into a dict, output paths are 
        overridden to point to the test dir, then re-written as the json config in the test dir. When the test-specific
        files are written, the paths are overridden to point to the test dir files.
        """
        IntegrationTestUtils.RUN_SYS_CONFIG_FILE_PATH = _sys_config_file
        IntegrationTestUtils.RUN_CONFIG_FILE_PATH = _run_config_file

        if _custom_dir_prefix is None:
            nowstring: str = IntegrationTestUtils.get_now_string()
            prefix: str = f"temp_{test_prefix}_" + nowstring
            # prefix: str = f"temp_{test_prefix}"
        else:
            prefix = _custom_dir_prefix

        IntegrationTestUtils.TEST_OUT_PATH = _outdir + prefix + os.sep
        IntegrationTestUtils.EMAIL_CRED_PATH = _email_cred_path

        IntegrationTestUtils.TEST_START_DATE = _custom_run_date

        IntegrationTestUtils.TEST_JOB_DIR = os.path.join(IntegrationTestUtils.TEST_OUT_PATH,
                                                         f"{processor_dir}_" +
                                                         IntegrationTestUtils.TEST_START_DATE)

        tests: TestSuite = TestLoader().loadTestsFromTestCase(test_case)

        if _test_report_dir is None:
            _test_report_dir = IntegrationTestUtils.TEST_OUT_PATH

        """
        the HTMLTestRunner will create the directory if it does not exist, we are putting the output into the 
        _test_report_dir, which defaults to TEST_OUT_PATH, which is the top-level test directory for this run, not the 
        TEST_JOB_DIR, which is the dir for the pipeline being tested. We need to make sure the TEST_JOB_DIR does not
        exist before the test is instantiated, to ensure the initial pipeline run takes place. Setting the test report
        to go into the TEST_JOB_DIR will create the parent dir and stop the pipeline from running
        """
        runner = HTMLTestRunner(output = _test_report_dir, log = True, report_name = f"{test_prefix}_results")
        result: TestResult = runner.run(tests)
        return result.wasSuccessful()


    @staticmethod
    def load_json_file(file: str) -> dict:
        with open(file) as config_file:
            config: dict = json.load(config_file)
            return config


    @staticmethod
    def write_json_file(values: dict, file: str):
        with open(file, 'w') as file:
            json.dump(values, file, indent = 4)


    @staticmethod
    def get_now_string() -> str:
        nowstring: str = datetime.datetime.today().strftime('%Y-%m-%d_%H%M%S')
        return nowstring


    @staticmethod
    def unpack_zip(zip_to_unpack: str, out_file: str):
        with ZipFile(zip_to_unpack) as zf:  # open the zip file
            for target_file in zf.namelist():  # check if the file exists in the archive
                zf.extract(target_file, out_file)

    @staticmethod
    def count_files_in_wildcard(wildcard: str) -> int:
        results = glob.glob(wildcard)
        return len(results)


    @staticmethod
    def check_file_not_empty(file_path: str) -> bool:
        return os.stat(file_path).st_size != 0

    @staticmethod
    def check_file_exists(file_path: str) -> bool:
        return os.path.isfile(file_path)

    @staticmethod
    def check_wildcard_exists_and_not_empty(wildcard: str) -> bool:

        """
        requires at least one file matching the wildcard to exist and not be empty
        """
        result = False
        files: List[str] = glob.glob(wildcard)
        for file in files:
            result = IntegrationTestUtils.check_file_not_empty(file)
            if result is False:
                break
        return result

    @staticmethod
    def check_file_exists_and_not_empty(file_path: str) -> bool:
        file_exists = IntegrationTestUtils.check_file_exists(file_path)
        file_not_empty = IntegrationTestUtils.check_file_not_empty(file_path)
        return file_exists and file_not_empty

    @staticmethod
    def run_partial_integration_test_pipeline(component: str,
                                              shortname: str,
                                              start_date: str,
                                              processor: ProcessorBase,
                                              **kwargs):

        """
        Runs the "run_Process" function in Processor.py with the given arguments for the partial integration tests.
        The full integration pipeline is run in the "run_full_integration_test_pipeline" function.

        :param processor:
        :param component:
        :param shortname:
        :param start_date:
        :param kwargs:
        :return:
        """

        args_dict: dict = {}

        sys_config_path = IntegrationTestUtils.RUN_SYS_CONFIG_FILE_PATH
        config_path = IntegrationTestUtils.RUN_CONFIG_FILE_PATH

        # note, possible to override these values in the kwargs loop below
        args_dict['live'] = False
        args_dict['noupload'] = True
        args_dict['start_date'] = start_date
        args_dict['component'] = component
        args_dict['short_name'] = shortname
        args_dict['log_level'] = 'info'
        args_dict['clearup'] = True

        for key, value in kwargs.items():
            args_dict[key] = value

        #  need EMAIL_CRED in the environment before we run a Processor
        os.environ["EMAIL_CRED"] = IntegrationTestUtils.EMAIL_CRED_PATH

        try:
            config: dict = processor.build_config(args_dict, config_path, sys_config_path)
            job_path: str = processor.generate_job_directory_path(config)
            processor.prepare_job_directory(job_path)
            processor_utils.setup_logging(job_path, config)  # logging set up after the job dir is prepared
            processor.run_process(config)
        except SystemExit as e:
            print(f"SystemExit: {e}")
            # we will eventually want to throw these to the calling class to be dealt with
            pass

    @staticmethod
    def run_external_pipeline(component: str,
                              short_name: str,
                              start_date: str,
                              processor: ProcessorBase,
                              **additional_args):

        args_dict: dict = {}

        config_path: str = IntegrationTestUtils.RUN_CONFIG_FILE_PATH
        sys_config_path: str = IntegrationTestUtils.RUN_SYS_CONFIG_FILE_PATH

        # note, possible to override these values in the additional_args loop below
        args_dict['live'] = False
        args_dict['noupload'] = True
        args_dict['start_date'] = start_date
        args_dict['component'] = component
        args_dict['short_name'] = short_name
        args_dict['log_level'] = 'info'
        args_dict['clearup'] = True

        for key, value in additional_args.items():
            args_dict[key] = value

        #  need EMAIL_CRED in the environment before we run a Processor
        os.environ["EMAIL_CRED"] = IntegrationTestUtils.EMAIL_CRED_PATH

        try:
            config: dict = processor.build_config(args_dict, config_path, sys_config_path)
            job_path: str = processor.generate_job_directory_path(config)
            processor.prepare_job_directory(job_path)
            processor_utils.setup_logging(job_path, config)  # logging set up after the job dir is prepared
            processor.run_process(config)
        except SystemExit:
            # allow this error to be thrown to the calling class - is dealt with by the test framework
            pass


    @staticmethod
    def get_day_before_as_string(input_date_string: str) -> str:
        date_format = "%Y%m%d"
        input_date: datetime = datetime.datetime.strptime(input_date_string, date_format)
        yesterday_date = input_date - datetime.timedelta(days = 1)
        yesterday_string = yesterday_date.strftime(date_format)
        return yesterday_string

    @staticmethod
    def count_tokens_in_file(file_path: str, token_to_find: str) -> int:
        fig_not_found_count = 0
        with open(file_path, 'r') as log_file:
            lines = log_file.readlines()
            for line in lines:
                if token_to_find in line:
                    fig_not_found_count += 1

        return fig_not_found_count