# -*- coding:utf-8 -*-

from typing import Dict, Union
import json
import hashlib
import time
import pathlib
import requests
import traceback
from .base_handler import BaseWebsocketHandler
from .base_handler import BaseRequestHandler
from ..config import mdb, slave_heartbeat_interval
from ..utility import db_data_tool
# from ..business.test_out_trigger_mail import send_mail


class TdpSlaveWebsocketHandler(BaseWebsocketHandler):
    slave: Dict[str,Dict[str, Union[BaseWebsocketHandler, int]]] = {}  # {slave_id: handler}

    @classmethod
    def check_slave_heartbeat(cls):
        t = time.time()
        for sid in cls.slave.copy().keys():
            d = cls.slave.get(sid)
            if d:
                if t - d['last_letter_time'] > slave_heartbeat_interval/2:
                    d['handler'].call_slave_method(sid, method='', data={'data': 'heartbeat'})

    @classmethod
    async def delete_slave(cls, sid):
        # slave_group_id = self.get_argument('group_id')
        # status = (await self.mdb.query_one_by_dict(table='tdp_slave', col=('status',), where={'id': slave_id}))['status']
        # if status == 'off-line':  # 执行机处于离线状态时，从表中删除执行机信息，否则 只修改状态
        await cls.mdb.delete_by_dict(table='tdp_slave', where={'id': sid})
        # else:
        #     await self.mdb.update_by_dict(table='tdp_slave', colv={'if_accept': 0}, where={'id': slave_id})
        if sid in cls.slave:
            cls.slave[sid]['handler'].write_message({'msg': '该执行机已被删除！'})
            cls.slave[sid]['handler'].close()
            cls.slave.pop(sid)

    @classmethod
    async def stop_task(cls, task_id, slave_id):
        try:
            cls.slave[slave_id]['handler'].call_slave_method(slave_id, method='stop_task', data={'task_id': task_id})
        except KeyError:
            s = (
                await cls.mdb.query_one_by_dict(table='tdp_slave', col=('status',), where={'id': slave_id})
            )
            if not s or s['status'] == 'off-line':
                await set_task_stop_status(task_id)

    async def register(self, **kwargs):
        sid = kwargs.get('Sid', '')
        accept_user = kwargs.get('Tdp_user', '')
        ip = self.request.headers.get("X-Real-Ip", self.request.remote_ip)  # 兼容nginx代理
        host_info = json.loads(kwargs.get('Host_info', ''))
        slave_name = kwargs.get('slave_name', '')
        if not slave_name:
            slave_name = f"{host_info['hostname']}/{host_info['username']}"
        d = {
            'id': sid,
            'ip': ip,
            'hostname': host_info['hostname'],
            'username': host_info['username'],
            'slave_name': slave_name,
            'status': 'on-line',
            'accept_user': accept_user
        }
        if not sid:
            sid = hashlib.md5(f"{host_info['hostname']}{str(time.time())}".encode(encoding='UTF-8')).hexdigest()
            d['id'] = sid
            if 'Sid' in self.request.headers:
                del(self.request.headers['Sid'])
                self.request.headers.add('Sid', sid)  # websocket断开时需要使用请求头中的sid参数
            await self.mdb.insert_by_dict(table='tdp_slave', data=d)
        else:
            d.pop('slave_name')
            if sid in self.slave:
                self.call_slave_method(sid, method='re_register', data={'msg': '请勿重复启动！'})
            if not (await self.mdb.query_one_by_dict(table='tdp_slave', where={'id': sid})):
                self.call_slave_method(sid, method='re_register', data={'msg': '不存在的执行机！请删除用户目录下的 .tdp 文件后重试！'})
                return
            else:
                await self.mdb.update_by_dict(table='tdp_slave', colv=d, where={'id': sid}, )

        self.slave[sid] = {}
        self.slave[sid]['handler'] = self
        self.slave[sid]['last_letter_time'] = time.time()
        token = self.encryption.token_enc({'sid': sid})
        self.call_slave_method(sid, method='re_register', data={'token': token, 'sid': sid})


async def select_waiting_task():
    s = db_data_tool.data2sql_in(TdpSlaveWebsocketHandler.slave.keys())
    if s:
        maps_dlist = await mdb.query(
            '''
            select s.id as slave_id, mts2g.slave_group_id
            from tdp_slave s
              left join map_tdp_slave2group mts2g on s.id = mts2g.slave_id
            where s.id in %s and s.if_accept=1 and s.parallel_now<s.parallel_max
            ''' % s
        )  # 查询空闲的执行机和组
        slave_list = db_data_tool.get_dlist_key_values(maps_dlist, 'slave_id')
        for sid in TdpSlaveWebsocketHandler.slave.copy().keys():
            if sid not in slave_list:
                continue
            group_list = db_data_tool.get_dlist_key_values(maps_dlist, 'slave_group_id', where={'slave_id': sid})
            s = db_data_tool.data2sql_in(group_list)
            if s:
                s = f'or t.executor_group_id in {s}'
            n = await mdb.execute(
                '''
                  update test_task t
                  set t.queue_lock=%s, t.status='lock', t.executor_id=%s
                  where t.status='wait' and t.start_time<=sysdate() and sysdate()-t.start_time<t.expire_time
                    and (t.executor_id=%s {s}) order by t.start_time limit 1;
                  '''.format(s=s), sid, sid, sid
            )
            if n:
                task_info = (
                    await mdb.query(
                        "select t.*, j.status as job_status from test_task t, test_job j "
                        "where t.queue_lock=%s and t.status='lock' and t.job_id=j.id "
                        "ORDER BY t.start_time DESC", sid)
                )[0]
                TdpSlaveWebsocketHandler.slave[sid]['handler'].call_slave_method(
                    sid, method='newAutotestTask', data=task_info
                )
                await mdb.execute('update tdp_slave set parallel_now=parallel_now+1 where id=%s', sid)
                if task_info['job_status'] == 'wait':
                    s = ', j.run_time=now() '
                else:
                    s = ''
                await mdb.execute(
                    '''update test_job j, test_task t set j.status='running'{s}
                    where t.id=%s and j.id=t.job_id'''.format(s=s), task_info['id']
                )


class TdpSlaveHandler(BaseRequestHandler):
    async def get_conf(self):
        pass

    async def get_taskInfo(self):
        testset_id = self.get_argument('testset_id')
        task_id = self.get_argument('task_id')
        case = await self.mdb.query(
            '''
select CAST(tc.id AS CHAR) AS case_id, CAST(tc.id AS CHAR) AS id, tc.case_title as title , tc.case_type as `condition`
, tc.mark as remark, 'tester' as designer, CAST(rcase.id AS CHAR) AS result_case_id, tc.case_num, mcs.seq_num
from test_case tc, map_test_case2set mcs, test_task tk, test_result_set rset, test_result_case rcase
where mcs.set_id=%s and mcs.case_id=tc.id and tk.testset_id=mcs.set_id and tk.id=%s and rset.task_id=tk.id
and rcase.result_set_id=rset.id and rcase.case_id=tc.id
''', testset_id, task_id
        )
        case_id_list = db_data_tool.get_dlist_key_values(case, 'case_id')

        s = db_data_tool.data2sql_in(case_id_list)
        if not s:
            self.exception_msg = '测试集为空'
            raise Exception('测试集为空')
        step = await self.mdb.query(
            '''
select CAST(tstep.id AS CHAR) AS step_id, tstep.step_name, CAST(tstep.case_id AS CHAR) AS case_id, tstep.seq_num, 
kw.en_name as keyword, ctl.en_name as control, pg.node_name as page, tstep.code, 
CAST(te.id AS CHAR) AS element, tstep.default_data as data, tstep.expected_result as expected
, tstep.export_data as output, tstep.mark as remark, CAST(tstep.snippet_id AS CHAR) AS snippet_id, tstep.snippet_setting, 
tstep.type as platformName, tstep.type, CAST(snippet.id AS CHAR) AS snippet_name
from test_step tstep
  left join test_keyword ctl on tstep.control_id = ctl.id
  left join test_keyword kw on tstep.keyword_id = kw.id
  left join test_element te on tstep.element_id = te.id
  left join test_element_node pg on te.page_id = pg.id
  left join test_case snippet on snippet.id=tstep.snippet_id
where tstep.case_id in {s};
'''.format(s=s)
        )
        # snippet_id_list = db_data_tool.get_dlist_key_values(step, 'snippet_id')
        snippet = []
        # if snippet_id_list:
        snippet = await self.mdb.query(
            '''
              select CAST(tc.id AS CHAR) AS case_id, CAST(tc.id AS CHAR) AS id, tc.case_title as title , 
                tc.case_type as `condition`, tc.mark as remark, tu.user_name as designer, tc.case_num 
              from test_case tc 
              left join tdp_user tu on tc.creator_id=tu.id 
              where tc.case_type='snippet'
              '''
        )
        if snippet:
            s = db_data_tool.data2sql_in(db_data_tool.get_dlist_key_values(snippet, 'case_id'))
            step += await self.mdb.query(
                '''
select CAST(tstep.id AS CHAR) AS step_id, tstep.step_name, CAST(tstep.case_id AS CHAR) AS case_id, tstep.seq_num
, kw.en_name as keyword, ctl.en_name as control, tstep.code
, pg.node_name as page, CAST(te.id AS CHAR) AS element, tstep.default_data as data, tstep.expected_result as expected
, tstep.export_data as output, tstep.mark as remark, tstep.snippet_id, tstep.snippet_setting, tstep.type as platformName
, snippet.case_num as snippet_name, tstep.type
from test_step tstep
  left join test_keyword ctl on tstep.control_id = ctl.id
  left join test_keyword kw on tstep.keyword_id = kw.id
  left join test_element te on tstep.element_id = te.id
  left join test_element_node pg on te.page_id = pg.id
  left join test_case snippet on snippet.id=tstep.snippet_id
where tstep.case_id in {s};'''.format(s=s)
            )
        s = db_data_tool.data2sql_in(db_data_tool.get_dlist_key_values(step, 'step_id'))
        if s:
            s = '(ts.id in {s} and ts.element_id=te.id) or '.format(s=s)
        element = await self.mdb.query(
            '''
select CAST(te.id AS CHAR) AS element_id, CAST(te.element_name AS CHAR) AS element, te.by_value as value, te.mark as remark
, pg.node_name as page, location_by.en_name as `by`, CAST(frame.id AS CHAR) as custom, CAST(frame.id AS CHAR) AS frame_id, te.type, 
frame_pg.node_name as frame_page
from test_element te
  left join test_element_node pg on te.page_id=pg.id
  left join test_keyword location_by on te.location_by_id = location_by.id
  left join test_element frame on te.frame_id=frame.id
  left join test_element_node frame_pg on frame.page_id = frame_pg.id
, test_step ts
where {s} te.type='web_frame' group by te.id;
'''.format(s=s),
        )

        # def merge_frame(ddlist: dict, frame_id) -> list:
        #     """递归找frame路径"""
        #     el = ddlist[frame_id]
        #     if not el['frame_id']:
        #         return [el]
        #     else:
        #         return [el] + merge_frame(ddlist, el['frame_id'])
        #
        # frames_dlist = await self.mdb.query(
        #     '''select e.id, e.by_value, e_by.en_name as `by`, e.frame_id, tpage.node_name as page_name
        #     from test_element e
        #       left join test_keyword e_by on e.location_by_id = e_by.id
        #       left join test_element_node tpage on e.page_id = tpage.id
        #     where e.type='web_frame'
        #     '''
        # )
        # element_frames_ddict = {}
        # frame_id_list = db_data_tool.get_dlist_key_values(element, 'frame_id')
        # frames_ddict = db_data_tool.dict_list2ddict(frames_dlist, 'id')
        # for frame_id in frame_id_list:
        #     element_frames_ddict[frame_id] = merge_frame(frames_ddict, frame_id)
        #     element_frames_ddict[frame_id].reverse()
        testdata = await self.mdb.query_one(
            '''
            select ms2p.testdata_set, tp.testdata_plan, tp.exec_config_plan
            from map_test_set2plan ms2p, test_task tk, test_plan tp
            where tk.id=%s and ms2p.map_id=tk.map_set2plan_id and tp.id=ms2p.plan_id
            ''', task_id
        )
        if testdata.get('testdata_set'):
            testdata_set = json.loads(testdata['testdata_set'])
        else:
            testdata_set = {}
        if testdata.get('testdata_plan'):
            testdata_plan = json.loads(testdata['testdata_plan'])
        else:
            testdata_plan = {}
        if testdata.get('exec_config_plan'):
            exec_config = json.loads(testdata['exec_config_plan'])
        else:
            exec_config = {}
        testdata_plan.update(testdata_set)
        data = {
            'case': sorted(case, key=lambda x: x['seq_num']) + snippet,
            'step': step,
            'testdata_define': testdata_plan,
            'exec_config': exec_config,
            'element': element,
            # 'frame': element_frames_ddict,
            'result_set_id': (await self.mdb.query_one_by_dict(
                table='test_result_set', col=('id',), where={'task_id': task_id}))['id']
        }
        self.success_data = data
        await self.mdb.execute("update test_task set status='running', run_time=now() where id=%s", task_id)

    async def post_autotestLog(self):
        data: str or dict = self.request_json
        task_id = self.get_argument('task_id')
        dtype = self.get_argument('type')
        result_case_id = self.get_argument('result_case_id', None)
        if dtype == 'run_log':
            await self.mdb.execute(
                "update test_result_set set log=json_array_append(log, '$', %s) where task_id=%s",data, task_id)
            if result_case_id:
                await self.mdb.execute(
                    "update test_result_case set log=json_array_append(log, '$', %s) where id=%s", data, result_case_id
                )
        elif dtype == 'run_log_snapshot':
            file = self.request.files['file'][0]
            file_name = file['filename']
            file_name_show = self.get_argument('file_name_show')
            log = f'{file_name_show}:'
            await self.mdb.execute(
                "update test_result_case set log=json_array_append(log, '$', %s) where id=%s", log, result_case_id
            )
            log = f'<a href="./{file_name}"><img src="./{file_name}" alt="{file_name_show}" width=50%/></a>'
            await self.mdb.execute(
                "update test_result_case set log=json_array_append(log, '$', %s) where id=%s", log, result_case_id
            )
            job_id = (
                await self.mdb.query_one('select job_id from test_task where id=%s', task_id)
            )['job_id']
            path = pathlib.Path(self.config.job_static_path, str(job_id))
            path.mkdir(parents=True, exist_ok=True)
            body = file['body']
            with open(pathlib.Path(path, file_name), 'wb') as f:
                f.write(body)
        elif dtype == 'case_result':
            result_case_id = data['result_case_id']
            start_time = data.get('start_timestamp', '')
            end_time = data.get('end_timestamp', '')
            d = {
                'result': data['result']
            }
            if start_time:
                d['start_time'] = int(start_time)
            if end_time:
                d['end_time'] = int(end_time)

            await self.mdb.update_by_dict(table='test_result_case', colv=d, where={'id': result_case_id})
        elif dtype == 'task_result':
            await self.mdb.execute(
                '''
                update test_task t, tdp_slave s 
                set t.status='completed', t.completed_time=now(), s.parallel_now=s.parallel_now-1 
                where t.id=%s and t.status='running' and s.id=t.executor_id 
                '''
                , task_id
            )
            d = await self.mdb.query_one(
                '''
select j.id, j.create_way, j.out_trigger_id, ifnull(task_completed_c.task_completed_count, 0) as task_completed_count
  , ifnull(task_c.task_count, 0) as task_count, t.next_task_id
from test_job j
left join test_task t on t.id=%s and j.id = t.job_id
left join (select job_id, count(1) as task_completed_count from test_task 
  where status in('completed', 'stop') group by job_id) task_completed_c
    on t.job_id = task_completed_c.job_id
left join (select job_id, count(1) as task_count from test_task group by job_id) task_c
    on t.job_id = task_c.job_id
where j.id=t.job_id
''', task_id
            )
            job_id = d['id']
            if d['task_completed_count'] == d['task_count']:
                time_s = time.strftime("%Y-%m-%d %H:%M:%S")
                await self.mdb.execute(
                    "update test_job set status='completed', completed_time=%s where id=%s and status='running'"
                    , time_s, job_id
                )
                # if d['create_way'] == 'out_trigger':
                #     trigger_conf = await self.mdb.query_one(
                #         'select * from test_out_trigger_conf where id=%s', d['out_trigger_id']
                #     )
                #     mail_sendee = json.loads(trigger_conf['mail_sendee'])
                #     post_url = trigger_conf['post_url']
                #     action_on_off = trigger_conf['mail_on_off'].split('|')
                #     if 'mail' in action_on_off and mail_sendee:
                #         # -------------- 触发测试完成发送测试报告邮件 --------------
                #         from .test_job_manager_handler import get_result
                #         report_dt = (await get_result(job_id))[0]
                #         fail_case = await self.mdb.query(
                #             "SELECT * FROM test_result_case WHERE job_id=%s AND result NOT IN ('success', '') ",
                #             job_id
                #         )
                #         fail_case_list = [c['case_name'] for c in fail_case]
                #         if not fail_case_list:
                #             fail_case_list = ['无']
                #         body_data = {
                #             "report_url": f'http://{self.request.host}/test/job?c=jobReport&job_id={job_id}&openWay=alone',
                #             "text_当前日期": time.strftime('%Y-%m-%d'),
                #             "text_前台_用例总数": report_dt['testAll'],
                #             "text_前台_成功总数": report_dt['testPass'],
                #             "text_前台_失败总数": report_dt['testFail']+report_dt['testSkip'],
                #             "text_前台_通过率": "%.2f%%" % (report_dt['testPass'] / report_dt['testAll'] * 100),
                #             "text_前台_开始执行时间": report_dt['beginTime'],
                #             "text_前台_结束执行时间": time_s,
                #             "text_前台_总耗时": report_dt['totalTime'],
                #             "text_失败场景": fail_case_list}
                #         try:
                #             send_mail(email_receiver=json.loads(trigger_conf['mail_sendee']), body_data=body_data)
                #             self.app_logger.info(f'自动化触发执行测试报告邮件发送成功，trigger_conf:{trigger_conf}')
                #         except:
                #             self.app_logger.error(f'自动化触发执行测试报告邮件发送失败，trigger_conf:{trigger_conf}')
                #             self.app_logger.error(traceback.format_exc())
                #     else:
                #         self.app_logger.warning(f'邮件开关为关闭状态或收件人为空，不进行邮件发送！job={job_id}，trigger_conf:{trigger_conf}')
                #     if 'cmp' in action_on_off and post_url:
                #         # -------------- 回调cmp发送测试结果 --------------
                #         from .test_job_manager_handler import get_result
                #         report_dt = (await get_result(job_id))[0]
                #         tmp = await self.mdb.query_one(
                #             'select flowBuildId, flowTaskBuildId from test_out_trigger_rec where job_id=%s', job_id
                #         )
                #         flowBuildId = tmp['flowBuildId']
                #         flowTaskBuildId = tmp['flowTaskBuildId']
                #         test_result = 1 if report_dt['testPass']>0 else 2  # 1-成功 2-失败
                #         d = {
                #             "flowTaskBuildId": flowTaskBuildId,
                #             "flowInstanceTestReport": {
                #                 "flowBuildId": flowBuildId,
                #                 "testType": 1,
                #                 "testNum": report_dt['testAll'],
                #                 "testSuccess": report_dt['testPass'],
                #                 "testFailed": report_dt['testAll'] - report_dt['testPass'],
                #                 "reportResult": test_result
                #             }
                #         }
                #         try:
                #             rsp = requests.post(post_url, json=d)
                #             self.app_logger.info(
                #                 f'自动化触发执行测试结果回调发送，调用返回：{rsp.json()}, '
                #                 f'测试任务jobID={job_id}，trigger_conf:{trigger_conf},'
                #                 f'post_data:{d}'
                #             )
                #         except:
                #             self.app_logger.error(
                #                 f'自动化触发执行测试结果回调发送失败，job={job_id}，trigger_conf:{trigger_conf},'
                #                 f'post_data:{d}'
                #             )
                #             self.app_logger.error(traceback.format_exc())
                #     else:
                #         self.app_logger.warning(f'cmp回调开关为关闭状态或post_url为空，不进行回调发送！job={job_id}，trigger_conf:{trigger_conf}')
            else:
                await self.mdb.query_one(
                    '''
                    update test_task next_task, test_task t2
                      set next_task.`status`='wait'
                    where t2.id=%s and next_task.id=t2.next_task_id AND next_task.`status`='hang'
                    ''', task_id
                )

    async def patch_stopTaskStatus(self):
        task_id = self.get_argument('task_id')
        await set_task_stop_status(task_id)


sql_set_job_stop_status = '''
        update test_job j, test_task t
          left join (select job_id, count(1) as count_stopped from test_task
          where status in('stopped', 'completed', 'stopping') group by job_id) c_task_stopped
            on c_task_stopped.job_id=t.job_id
          left join (select job_id, count(1) as count_all from test_task group by job_id) c_task_all
            on c_task_all.job_id=t.job_id
        set j.status='stopped', j.completed_time=now()
        where t.id=%s and j.id=t.job_id and c_task_stopped.count_stopped=c_task_all.count_all
        '''
async def set_task_stop_status(task_id):
    await mdb.execute(
        "update test_task t, tdp_slave s "
        "set t.status='stopped', t.completed_time=now(), s.parallel_now=s.parallel_now-1 "
        "where t.id=%s and t.status='stopping' and s.id=t.executor_id"
        , task_id
    )
    await mdb.execute(sql_set_job_stop_status, task_id)

