From 8bece38704fedca14f4b5d74e557a7fcf8d7150b Mon Sep 17 00:00:00 2001
From: Jake Smith <jws52@cam.ac.uk>
Date: Mon, 4 Jan 2021 16:53:44 +0000
Subject: [PATCH] Updated ODK Briefcase. Note there are impacts on format of
 exported csv file.

---
 ProcessorComponents.py | 46 ++----------------------------------------
 1 file changed, 2 insertions(+), 44 deletions(-)

diff --git a/ProcessorComponents.py b/ProcessorComponents.py
index a2135e6..e5e6064 100644
--- a/ProcessorComponents.py
+++ b/ProcessorComponents.py
@@ -162,13 +162,13 @@ def process_in_job_survey(jobPath,status,config,component):
     ODK_output_path = f"{jobPath}/ExportRawDB"
 
     # get data from ODK server
-    ODK_jar = '/storage/app/EWS/General/EWS-Coordinator/ODK-Briefcase-v1.11.2.jar'
+    ODK_jar = '/storage/app/EWS/General/EWS-Coordinator/ODK-Briefcase-v1.18.0.jar'
     ODK_download = ['java',
             '-jar', ODK_jar,
             '--pull_aggregate',
             '--form_id', cred['form_id'],
             '--storage_directory', ODK_output_path,
-            '--aggregate_url', cred['server'],
+            '--odk_url', cred['server'],
             '--odk_username',cred['user'],
             '--odk_password',cred['pass']]
     ODK_download_success = True
@@ -280,26 +280,6 @@ def process_in_job_survey(jobPath,status,config,component):
     except:
         status.reset('ERROR')
         endJob(status,premature=True)
-
-    #pre_process = subprocess.run(R_process_surveys, 
-    #            check=True,
-    #            stdout = subprocess.PIPE,
-    #            stderr = subprocess.STDOUT)
-
-    #    # pass the stdout to logger
-    #    for line in pre_process.stdout.decode('utf-8').split(r'\n'):
-    #        logger.info('survey preprocessor: ' + line)
-
-    #except subprocess.CalledProcessError as e:
-    #    
-    #    for line in e.stdout.decode('utf-8').split(r'\n'):
-    #        logger.info('survey preprocessor: ' + line)
-
-    #    # TODO: try to use yesterday's Processed_SurveyData.csv
-
-    #    logger.exception('Failed to process surveys with removals and additions')
-    #    status.reset('ERROR')
-    #    endJob(status,premature=True)
     
     logger.debug('Preparing clustering calculation')
 
@@ -344,28 +324,6 @@ def process_in_job_survey(jobPath,status,config,component):
         status.reset('ERROR')
         endJob(status,premature=True)
 
-    #try:
-
-    #    clustering_process = subprocess.run(clustering_calc, 
-    #            env=clustering_env, 
-    #            check=True,
-    #            stdout = subprocess.PIPE,
-    #            stderr = subprocess.STDOUT)
-
-    #    # pass the stdout to logger
-    #    for line in clustering_process.stdout.decode('utf-8').split(r'\n'):
-    #        logger.info('wheat-source-generation: ' + line)
-
-    #except subprocess.CalledProcessError as e:
-    #    
-    #    for line in e.stdout.decode('utf-8').split(r'\n'):
-    #        logger.info('wheat-source-generation: ' + line)
-
-    #    logger.exception('Failed to perform source calculation on processed surveys')
-
-    #    status.reset('ERROR')
-    #    endJob(status,premature=True)
-    
     logger.debug('Checking output of clustering calculation')
 
     clustering_output_path_glob = f"{cluster_calc_path}/output/sources_{date.strftime('%Y-%m-%d')}_*.csv"
-- 
GitLab