import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor, as_completed
from apiClient import APIClient
import logging
import threading
from lxml import etree
from testrail import *
import time
from parseResult import *
logging.basicConfig(level = logging.INFO, format = 
                    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
from datetime import datetime
from collections import Counter
import sys

################################################################################
## This file used for import robot execution result to test rail
################################################################################
G_PROJECT_ID = 21         # TANGO
G_INCLUDE_ALL = False
G_ASSIGN_TO_ID = 144     # kenqi
G_INT_MAX_RESULT_COUNT=6  ##set max result count, when result count is more than this number, delete test run and re-create
PAGE_SIZE=250
G_SUITE_ID=61559
client = APIClient('https://dbschenker.testrail.io/')
client.user = 'ken.qi@dbschenker.com'
client.password = '92godpvJbegbbFVnVO8C-PGtP75T/wyWJ9qkRVPPy'
MAX_NUMBERS={
    'MAX_CASE_NUMBER':10000,
    'MAX_DAILY_AUTOMATION_CASE_NUMEBR':5000,
    'MAX_WEEKEND_AUTOMATION_CASE_NUMEBR':3000,
    'MAX_HOTFIX_AUTOMATION_CASE_NUMEBR':3000
}

ROBOT_STATUS={
        "00 - Planned":1,
        "10 - Changed":2,
        "20 - Automated":3,
        "30 - Impossible":4,
        "40 - Not Needed":5,
        "99 - Decommissioned":6
}

MILESTONE_IDS={
    'Daily':3199,
    'Weekend':3200
}

SUITE_IDS={
    "EQP_SUITE" : 31732,         #EQP
    "GRIT_SUITE" : 31733         #GRIT
}

RUN_NAMES={
    'EQP_Daily':"INT EQP Daily Run",
    'EQP_Weekend':"INT EQP Weekend Run",
    'GRIT_Daily':"INT GRIT Daily Run",
    'GRIT_Weekend':"INT GRIT Weekend Run",
    'EQP_Shakedown':"FAT EQP Automation",
    'GRIT_Shakedown':"FAT GRIT Automation",
    'FAT_EQP_HOTFIX': "EQP FAT Automation Test",
    'FAT_GRIT_HOTFIX': "GRIT FAT Automation Test",
    'INT_EQP_HOTFIX': "EQP INT Automation Test",
    'INT_GRIT_HOTFIX': "GRIT INT Automation Test"
}

class TestRail(object):
    def get_case_by_id(self,case_id):
        if case_id.startswith("C"):
            case_id=case_id[1:]
        try:
            case = client.send_get(f"get_case/{case_id}")
            return case
        except Exception as e:
            print(f"Error fetching case details: {e}")
            return None


    def get_case_id_by_name(self,case_name):
        try:
            case = client.send_get(f"get_case/{G_PROJECT_ID}&title={case_name}")
            case_id = case["id"]
            return case_id
        except Exception as e:
            print(f"Error fetching case details: {e}")
            return None

    def get_suite_id_by_project(self):
        try:
            suites = client.send_get(f"get_suites/{G_PROJECT_ID}")
            print(suites)
            return suites
        except Exception as e:
            print(f"Error fetching case details: {e}")
            return None

    def get_test_by_id(self,id):
        try:
            case = client.send_get(f"get_case_fields")
            print(case)
            return case
        except Exception as e:
            print(f"Error fetching case details: {e}")
            return None
    
    def get_all_tests_by_runid(self,run_id,tests_count):
        limit = 250
        total_tests = []
        offsets = []
        # First, determine the total number of cases
        initial_cases = TestRail().get_tests(run_id, 0, limit)
        total_tests = initial_cases
        if len(initial_cases) < limit:
            return total_tests

        # Calculate the offsets based on the total number of cases
        # If the initial fetch was complete, assume more pages are needed
        for offset in range(limit, tests_count, limit):
            offsets.append(offset)

        with ThreadPoolExecutor(max_workers=10) as executor:
            future_to_offset = {
                executor.submit(TestRail().get_tests,run_id, offset, limit): offset
                for offset in offsets
            }
            for future in as_completed(future_to_offset):
                cases = future.result()
                if cases:
                    total_tests += cases
        return total_tests
    
    # Fetch sections from the suite
    def get_all_sections(self):
        sections = []
        offset = 0
        while True:
            result = client.send_get(f"get_sections/{G_PROJECT_ID}&suite_id={G_SUITE_ID}&limit={PAGE_SIZE}&offset={offset}")
            sections +=result['sections']
            if len(result['sections']) < PAGE_SIZE:
                break
            offset +=PAGE_SIZE
        return sections 

    # Fetch cases from a section
    def get_cases_from_section(self,section_id):
        case =  client.send_get(f"get_cases/{G_PROJECT_ID}&suite_id={G_SUITE_ID}&section_id={section_id}")
        return case["cases"]

    # Recursive function to get all section IDs within a parent section
    def get_all_section_ids(self,sections, parent_id):
        section_ids = []
        for section in sections:
            if section['parent_id'] == parent_id:
                section_ids.append(section['id'])
                section_ids += self.get_all_section_ids(sections, section['id'])
                # section_ids.extend(get_all_section_ids(sections, section['id']))
        return section_ids

    def get_cases(self, offset=0, limit=PAGE_SIZE):
        result = client.send_get(f"get_cases/{G_PROJECT_ID}&suite_id={G_SUITE_ID}&limit={limit}&offset={offset}")
        return result['cases']
    
    def update_case(self, case_id, update_dict):
        url = 'update_case/%s' % int(case_id)
        response = client.send_post(url, update_dict)
        return response
   
    def update_cases_concurrently(self,updates_array, max_workers=1):
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = {executor.submit(self.update_case, case['case_id'], case['update_dict']): case for case in updates_array}
            for future in as_completed(futures):
                case = futures[future]
                try:
                    result = future.result()
                    # results.append(result)
                except Exception as e:
                    print(f"Error updating case {case['case_id']}: {e}")
    
    def mass_add_results(self, run_id, results_dict):
        add_url = 'add_results/%s' % run_id
        client.send_post(add_url, results_dict)
        
    def get_user_by_email(self):
        try:
            # Get case details by case name
            users = client.send_get(f"get_user_by_email&email=ken.qi@dbschenker.com")
            print(users)
            return users
        except Exception as e:
            print(f"Error fetching case details: {e}")
            return None

    def get_milestones_by_name(self,milestone_name):
         milestones = []
         offset = 0
         while True:
               result = client.send_get(f"get_milestones/{G_PROJECT_ID}&limit={PAGE_SIZE}&offset={offset}")
               if len(result['milestones'])==0:
                  break
               milestones += result['milestones']
               offset += PAGE_SIZE
               if len(result) < PAGE_SIZE:
                  break
         index = next((index for index, item in enumerate(milestones) if item['name'] == milestone_name), -1)
         if index>0:
            return milestones[index]
         else:
            raise ValueError(f"milestone : {milestone_name} does not exists")
    
    '''
    Get tests by test run id
    '''
    def get_tests(self,run_id,offset=0,limit=250):
        result= client.send_get(f"get_tests/{run_id}&limit={limit}&offset={offset}")
        return result['tests']
    
    def create_test_run(self,suite_id,run_name,milestone_id,case_ids):
        run_dict = { 
            'suite_id': suite_id, 
            'milestone_id': milestone_id,
            'assignedto_id': G_ASSIGN_TO_ID, 
            'include_all': G_INCLUDE_ALL,
            'case_ids':case_ids
        }
        run_dict["name"] = run_name
        url = 'add_run/%s' % G_PROJECT_ID
        response = client.send_post(url, run_dict)
        return response['id']
    
    def get_test_run(self,run_name,milestone_id):
        all_runs = client.send_get(f"get_runs/{G_PROJECT_ID}&milestone_id={milestone_id}")
        all_runs = all_runs['runs']
        if len(all_runs)==0:
           return -1
        index = next((index for index, run in enumerate(all_runs) if run['name'] == run_name), None)
        if index is not None:
           return all_runs[index]['id']
            
    def update_test_run(self,run_id,data):
        client.send_post(f'update_run/{run_id}',data)
    
    
    def delete_testrun(self,run_id):
        client.send_post(f"delete_run/{run_id}")

    def delete_sections(self,section_id):
        url = f'delete_section/{section_id}'
        return client.send_post(url)
    
    def create_milestone(self,milestone_dict):
        url = 'add_milestone/%s' % G_PROJECT_ID
        response = client.send_post(url, milestone_dict)
        return response['id']

    
class CommonFunctions(object):
    def get_all_cases_in_sections_parallel(self):
        limit = 250
        total_cases = []
        offsets = []
        # First, determine the total number of cases
        initial_cases = TestRail().get_cases( 0, limit)
        total_cases = initial_cases
        if len(initial_cases) < limit:
            return total_cases

        # Calculate the offsets based on the total number of cases
        # If the initial fetch was complete, assume more pages are needed
        for offset in range(limit, MAX_NUMBERS['MAX_CASE_NUMBER'], limit):
            offsets.append(offset)

        with ThreadPoolExecutor(max_workers=10) as executor:
            future_to_offset = {
                executor.submit(TestRail().get_cases, offset, limit): offset
                for offset in offsets
            }
            for future in as_completed(future_to_offset):
                cases = future.result()
                if cases:
                    total_cases += cases
        print(len(total_cases))
        return total_cases

           
    def get_all_automation_cases(self):
        all_cases = self.get_all_cases_in_sections_parallel()
        all_automations_cases = []
        for case in all_cases:
            is_robot = isinstance(case['custom_robot'],(int)) and case['custom_robot']==ROBOT_STATUS['20 - Automated']
            has_robot_name = case['custom_robot_script_name']=None and case['custom_robot_script_name']!=''
            if is_robot or has_robot_name:
               all_automations_cases.append(case)
        return all_automations_cases
    
    def add_results_by_runid(self,run_id,results,tango_version,max_tests_count,is_update_case:bool,is_set_author=False):
        print(f'Start find test runs ,start at {datetime.now()}')
        tests = TestRail().get_all_tests_by_runid(run_id,max_tests_count)
        print(f'End find test runs,numbers are {len(tests)}, end at {datetime.now()}')
        new_results = []
        add_url = 'add_results/%s' % run_id
        updates_array = []
        for result in results:
            test_run_index = next((index for index, test in enumerate(tests) if test['case_id'] == int(result['case_id'])), -1)
            if test_run_index>-1:
                    test = tests[test_run_index]
                    #1.generate add result json 
                    add_result_dict = {'status_id': result['status_id'],
                        'test_id': test['id'],              
                        'Comment': 'Updated by automated scripts.',
                        'custom_run_environment':'8',  ##test_environemnt:other
                        'elapsed':result['elapsed'],
                        'version':tango_version,
                        'defects':result['defects'], ##add defect id
                        'assignedto_id':G_ASSIGN_TO_ID,
                        'custom_step_results': result['steps']}
                    if is_set_author:
                       add_result_dict['assignedto_id']=FATHandling().user_mapping(result['author'])
                    new_results.append(add_result_dict)
                    ##2.mass add result for every 100 results
                    if len(new_results)>99:
                        TestRail().mass_add_results(run_id,{'results': new_results })
                        new_results = []
                    # 3.generate update case array
                    update_dict = {
                        'estimate':result['elapsed'],
                        'automationresult': result['status_id'],
                        'custom_robot_last_execution_version': tango_version,
                        'custom_robot_last_success_version': tango_version if result['status_id']==1 else test['custom_robot_last_success_version'],
                        'custom_robot_last_fail_version': tango_version if result['status_id']==5 else test['custom_robot_last_fail_version'],
                    }
                    updates_array.append({'case_id' :result['case_id'],'update_dict':update_dict})
            else:
                print(f"{result['case_name']} can't be imported,do not have this test run")    
        ##mass add result
        if len(new_results)>0:
            TestRail().mass_add_results(run_id,{'results': new_results })
        ##mass update case
        if is_update_case:
           TestRail().update_cases_concurrently(updates_array) 
   
    ##remvoe untested test after import all the results
    def remove_untested_runs(self,run_name,milestone_id,max_tests):
        run_id = TestRail().get_test_run(run_name,milestone_id)
        tests = TestRail().get_all_tests_by_runid(run_id,max_tests)
        # Filter out items where the key 'status' has the value 3
        filtered_data = [item for item in tests if item.get('status_id') != 3]
        if len(tests)>len(filtered_data):
            # Extract all values for the key 'case_id'
            case_ids = [item['case_id'] for item in filtered_data if 'case_id' in item]
            TestRail().update_test_run(run_id,case_ids)
class IntHandling(object):        
    def get_int_test_run(self,run_name,milestone_id,max_result_count):
        all_runs = client.send_get(f"get_runs/{G_PROJECT_ID}&milestone_id={milestone_id}")
        all_runs = all_runs['runs']
        if len(all_runs)==0:
           return -1
        index = next((index for index, run in enumerate(all_runs) if run['name'] == run_name), None)
        if index is not None:
            run_id = all_runs[index]['id']
            results = client.send_get(f"get_results_for_run/{run_id}")
            if results['size']==0:
                return run_id
            else:
                results = results['results']
                # Extract values for the key "test_id"
                test_ids = [item['test_id'] for item in results]
                response = client.send_get(f"get_results/{test_ids[0]}")
                if response['size']>max_result_count:
                   TestRail().delete_testrun(run_id)
                   print(f"Test Result Count > {max_result_count},re-create test run {run_name}")
                   return -1
                else:
                    return run_id
        else:
            return -1
                
    def import_result(self,output,tango_version,run_name,suite_id,milestone_id,max_result_count=G_INT_MAX_RESULT_COUNT):
        #1.parse output,if pass rate less than 60, do not import 
        parsed_results = parse(output)
        passed_count = len([item for item in parsed_results if item.get('status_id') == 1])
        pass_rate = (passed_count/len(parsed_results))*100
        if pass_rate < 60:
           return
        case_ids = []
        for result in parsed_results:
            case_id = result['case_id']
            if case_id.isdigit() and int(case_id) > 0:
                case_ids.append(int(case_id))
            else:
                case_name = result['case_name']
                print(f"Case {case_name} id: {case_id} format is not right, will not import result")
        # print(parsed_results)
        #2.create test run ,if test run exists, then get run id
        print(f'Find Test Run, start at {datetime.now()}')
        run_id = self.get_int_test_run(run_name,milestone_id,max_result_count)
        if run_id==-1:
           run_id=TestRail().create_test_run(suite_id,run_name,milestone_id,case_ids)
        print(f'Run id is {run_id},Run name is {run_name}')
        #3.add result
        print(f'Insert result and update test case,start at {datetime.now()}')
        start = datetime.now()
        CommonFunctions().add_results_by_runid(run_id, parsed_results,tango_version,len(case_ids),True)
        end = datetime.now()
        test_duration = (end - start).total_seconds()
        print(f'Insert Done,it costs {int(test_duration)}')
    
    def import_grit_result(self,output,tango_version,type):
        if type=="DAILY":
           self.import_result(output,tango_version,RUN_NAMES['GRIT_Daily'],SUITE_IDS['GRIT_SUITE'],MILESTONE_IDS['Daily'])
        elif type=="WEEKEND":
           self.import_result(output,tango_version,RUN_NAMES['GRIT_Weekend'],SUITE_IDS['GRIT_SUITE'],MILESTONE_IDS['Weekend'])

   
    def import_eqp_result(self,output,tango_version,type):
        if type=="DAILY":
           self.import_result(output,tango_version,RUN_NAMES['EQP_Daily'],SUITE_IDS['EQP_SUITE'],MILESTONE_IDS['Daily'])
        elif type=="WEEKEND":
           self.import_result(output,tango_version,RUN_NAMES['EQP_Weekend'],SUITE_IDS['EQP_SUITE'],MILESTONE_IDS['Weekend'])
        

                        
        
class FATHandling(object):
    def user_mapping(self,user):
        user_dict = {
        'wendy':1588,'lynn':858,'bella':145,'fiona':1586,'nella':1590,
            'terence':1587,'jane':133,'betty':1589,'lynn':858,'arya':1266,
            'momo':1594,'timothy':1592,'joe':1593}
        return user_dict.get(user,G_ASSIGN_TO_ID)
    
    def create_milestone(self,tango_version):
        milestone = TestRail().get_milestones_by_name(tango_version)
        sub_milestones = milestone['milestones']
        fat_milestone = f"{tango_version} FAT Automation"
        index = next((index for index, item in enumerate(sub_milestones) if item['name'] == fat_milestone), -1)
        if index >-1:
           return  sub_milestones[index]['id']
        else:
           print(f"milestone:{fat_milestone} not exists, will create one")
           milestone_dict = {
               "name":fat_milestone,
               "parent_id":milestone['id'],
               "description":"Milestone created by automation scripts"
           }
           return TestRail().create_milestone(milestone_dict)
    
    def import_shakedown_result(self,output,tango_version,run_name,suite_id):
        #1.parse output
        parsed_results = parse(output)
        case_ids = []
        for result in parsed_results:
            case_id = result['case_id']
            if case_id.isdigit() and int(case_id) > 0:
                case_ids.append(int(case_id))
            else:
                case_name = result['case_name']
                print(f"Case {case_name} id: {case_id} format is not right, will not import result")
        #2.create test run        
        run_id = self.create_shakedown_test_run(tango_version,suite_id,case_ids,run_name)
        #3.add result
        print(f'Insert result and update test case,start at {datetime.now()}')
        start = datetime.now()
        CommonFunctions().add_results_by_runid(run_id, parsed_results,tango_version,len(case_ids),True,True)
        end = datetime.now()
        test_duration = (end - start).total_seconds()
        print(f'Insert Done,it costs {int(test_duration)}') 
                       
    ##all case ids are from shakedown report, do not get from cases.
    def create_shakedown_test_run(self,version,suite_id,case_ids,run_name):
        milestone_id = self.create_milestone(version)
        run_dict = { 
            'suite_id': suite_id, 
            'milestone_id': milestone_id,
            'assignedto_id': G_ASSIGN_TO_ID, 
            'include_all': G_INCLUDE_ALL,
            'case_ids':case_ids
        }
        run_dict["name"] = version+" "+run_name
        url = 'add_run/%s' % G_PROJECT_ID
        response = client.send_post(url, run_dict)
        return response['id']
    
    def add_shakedown_result(self,output,tango_version,run_name):
        milestone = TestRail().get_milestones_by_name(tango_version)
        sub_milestones = milestone['milestones']
        fat_milestone = f"{tango_version} FAT Automation"
        index = next((index for index, item in enumerate(sub_milestones) if item['name'] == fat_milestone), -1)
        milestone_id = sub_milestones[index]['id']
        all_runs = client.send_get(f"get_runs/{G_PROJECT_ID}&milestone_id={milestone_id}")
        all_runs = all_runs['runs']
        index = next((index for index, run in enumerate(all_runs) if run['name'] == f'{tango_version} {run_name}'), None)
        run_id = all_runs[index]['id']
        parsed_results = parse(output)
        CommonFunctions().add_results_by_runid(run_id,parsed_results,tango_version,500,True,True)
    
class HotfixHandling(object):
    def get_hotfix_milestone(self,tango_version):
        milestones = []
        offset = 0
        while True:
            result = client.send_get(f"get_milestones/{G_PROJECT_ID}&limit={PAGE_SIZE}&offset={offset}")
            if len(result['milestones'])==0:
                break
            milestones += result['milestones']
            offset += PAGE_SIZE
            if len(result) < PAGE_SIZE:
                break
        index = next((index for index, item in enumerate(milestones) if item['name'] == tango_version), -1)
        if index>0:
           return milestones[index]
        else:
           index = next((index for index, item in enumerate(milestones) if tango_version.startswith(item['name'])), -1)
           if index>0:
              sub_milestones = milestones[index]['milestones']
              index = next((index for index, item in enumerate(sub_milestones) if item['name'] == tango_version), -1)
              return sub_milestones[index]['id']
          
    def import_hotfix_result(self,output,tango_version,suite_id,run_name,is_set_author):
        parsed_results = parse(output)
        case_ids = []
        for result in parsed_results:
            case_id = result['case_id']
            if case_id.isdigit() and int(case_id) > 0:
                case_ids.append(int(case_id))
            else:
                case_name = result['case_name']
                print(f"Case {case_name} id: {case_id} format is not right, will not import result")
        #2.create test run
        run_id = self.create_hotfix_test_run(tango_version,suite_id,case_ids,run_name)
        #3.add result
        print(f'Insert result and update test case,start at {datetime.now()}')
        start = datetime.now()
        CommonFunctions().add_results_by_runid(run_id, parsed_results,tango_version,len(case_ids),True,is_set_author)
        end = datetime.now()
        test_duration = (end - start).total_seconds()
        print(f'Insert Done,it costs {int(test_duration)}') 
                   
    ##all case ids are from hotfix report, do not get from cases.
    def create_hotfix_test_run(self,version,suite_id,case_ids,run_name):
        milestone_id = self.get_hotfix_milestone(version)
        run_dict = { 
            'suite_id': suite_id, 
            'milestone_id': milestone_id,
            'assignedto_id': G_ASSIGN_TO_ID, 
            'include_all': G_INCLUDE_ALL,
            'case_ids':case_ids
        }
        run_dict["name"] = run_name
        url = 'add_run/%s' % G_PROJECT_ID
        response = client.send_post(url, run_dict)
        return response['id']


    def add_hotfix_result(self,output,tango_version,run_name,is_set_author):
        milestone_id = self.get_hotfix_milestone(tango_version)
        all_runs = client.send_get(f"get_runs/{G_PROJECT_ID}&milestone_id={milestone_id}")
        all_runs = all_runs['runs']
        index = next((index for index, run in enumerate(all_runs) if run['name'] == run_name), None)
        run_id = all_runs[index]['id']
        parsed_results = parse(output)
        CommonFunctions().add_results_by_runid(run_id,parsed_results,tango_version,300,True,is_set_author)
        
def int_daily_import_grit(output,tango_version):
    IntHandling().import_result(output,tango_version,RUN_NAMES['GRIT_Daily'],SUITE_IDS['GRIT_SUITE'],MILESTONE_IDS['Daily'])

def int_daily_import_eqp(output,tango_version):
    IntHandling().import_result(output,tango_version,RUN_NAMES['EQP_Daily'],SUITE_IDS['EQP_SUITE'],MILESTONE_IDS['Daily'])

def int_weekend_import_grit(output,tango_version):
    IntHandling().import_result(output,tango_version,RUN_NAMES['GRIT_Weekend'],SUITE_IDS['GRIT_SUITE'],MILESTONE_IDS['Weekend'])

def int_weekend_import_eqp(output,tango_version):
    IntHandling().import_result(output,tango_version,RUN_NAMES['EQP_Weekend'],SUITE_IDS['EQP_SUITE'],MILESTONE_IDS['Weekend'])

def hotfix_import_eqp(env_type,output,tango_version):
    run_name = RUN_NAMES['INT_EQP_HOTFIX']
    is_set_author = False
    if env_type=="FAT":
       run_name = RUN_NAMES['FAT_EQP_HOTFIX']
       is_set_author = True
    run_name = tango_version+" "+run_name
    HotfixHandling().import_hotfix_result(output,tango_version,SUITE_IDS['EQP_SUITE'],run_name,is_set_author)
    
def hotfix_import_grit(env_type,output,tango_version):
    run_name = RUN_NAMES['INT_GRIT_HOTFIX']
    is_set_author = False
    if env_type=="FAT":
       run_name = RUN_NAMES['FAT_GRIT_HOTFIX']
       is_set_author = True
    run_name = tango_version+" "+run_name
    HotfixHandling().import_hotfix_result(output,tango_version,SUITE_IDS['GRIT_SUITE'],run_name,is_set_author)

def add_hotfix_eqp_result(env_type,output,tango_version):
    run_name = RUN_NAMES['INT_EQP_HOTFIX']
    is_set_author = False
    if env_type=="FAT":
       run_name = RUN_NAMES['FAT_EQP_HOTFIX']
       is_set_author = True
    run_name = tango_version+" "+run_name
    HotfixHandling().add_hotfix_result(output,tango_version,run_name,is_set_author)
    
def add_hotfix_grit_result(env_type,output,tango_version):
    run_name = RUN_NAMES['INT_GRIT_HOTFIX']
    is_set_author = False
    if env_type=="FAT":
       run_name = RUN_NAMES['FAT_GRIT_HOTFIX']
       is_set_author = True
    run_name = tango_version+" "+run_name
    HotfixHandling().add_hotfix_result(output,tango_version,run_name,is_set_author)
        
if __name__ == "__main__":
    module=sys.argv[1]
    tango_version=sys.argv[2]
    output=sys.argv[3]
    if module=="EQP":
       FATHandling().import_shakedown_result(output,tango_version,RUN_NAMES['EQP_Shakedown'],SUITE_IDS['EQP_SUITE'])
    elif module=="GRIT":
       FATHandling().import_shakedown_result(output,tango_version,RUN_NAMES['GRIT_Shakedown'],SUITE_IDS['GRIT_SUITE'])
    elif module=="EQP_Add_Result":
       FATHandling().add_shakedown_result(output,tango_version,RUN_NAMES['EQP_Shakedown'])
    elif module=="GRIT_Add_Result":
       FATHandling().add_shakedown_result(output,tango_version,RUN_NAMES['GRIT_Shakedown'])
    elif module=="HOTFIX_FAT_EQP":
       hotfix_import_eqp("FAT",output,tango_version)
    elif module=="HOTFIX_FAT_GRIT":
       hotfix_import_grit("FAT",output,tango_version)   
    elif module=="HOTFIX_FAT_EQP_Add_Result":
       add_hotfix_eqp_result("FAT",output,tango_version)   
    elif module=="HOTFIX_FAT_GRIT_Add_Result":
       add_hotfix_grit_result("FAT",output,tango_version)      
    elif module=="HOTFIX_INT_EQP_Add_Result":
       add_hotfix_eqp_result("INT",output,tango_version)   
    elif module=="HOTFIX_INT_GRIT_Add_Result":
       add_hotfix_grit_result("INT",output,tango_version)         
