"""
Case Type   : datakit录制回放工具
Case Name   : 批量删除录制回放任务
Create At   : 2025/3/27
Owner       : @lonely-dance
Description :
    1.导入服务器和实例
    2.创建源端目标端库
    3.创建多个任务
    4.批量删除任务
    5.清理环境
Expect      :
    1.成功
    2.成功
    3.成功
    4.成功
    5.成功
History     :
"""

import os
import re
import time
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.ComThread import ComThread
from testcase.utils.Logger import Logger
from testcase.utils.datakit_api.CommonApi import CommonApi
from testcase.utils.datakit_api.DataMigration import DataMigrationApi
from testcase.utils.CommonDatakit import CommonDatakit
from testcase.utils.CommonMySQL import CommonMySQL


class Tools(unittest.TestCase):
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.mysql_node = Node('mysql5')
        self.mysql_root_node = Node('mysql5Root')
        self.primary_node = Node('PrimaryDbUser')
        self.primary_root_node = Node('PrimaryRoot')
        self.datakit_node = Node('Datakit')
        self.host_list = [self.primary_root_node, self.mysql_root_node]
        self.db_list = [self.mysql_node, self.primary_node]
        self.com_sh = CommonSH('PrimaryDbUser')
        self.token = self.com_sh.get_token_value(macro.DATAKIT_PASSWD)
        self.log.info(self.token)
        self.assertNotEqual("error", self.token)
        self.com_api = CommonApi(self.token)
        self.datakit_sh = CommonDatakit(self.token)
        self.data_migration_api = DataMigrationApi(self.token)
        self.com_mysql = CommonMySQL('mysql5')
        self.task_name = re.search(r'Case\d+', os.path.basename(__file__)).group()
        self.source_db = 'transcribe'
        self.target_db = 'replay_db'
        self.task_id = ''

    def test_transcribe_replay(self):
        text = '----step1:添加服务器 expect:成功----'
        self.log.info(text)
        self.datakit_sh.import_host(self.host_list)
        self.datakit_sh.import_db(self.db_list)

        text = '----step2:创建源端和目标端库----'
        self.log.info(text)
        sql1 = f'drop database if exists {self.source_db}; create database {self.source_db};'
        self.log.info(sql1)
        res1 = self.com_mysql.execut_db_sql(sql1)
        self.log.info(res1)
        sql2 = f"""source {macro.DB_ENV_PATH};
        gsql -d postgres -p {self.primary_node.db_port} -c "drop database if exists {self.target_db};
                                      create database {self.target_db} with dbcompatibility 'B';"
        gsql -d {self.target_db} -p {self.primary_node.db_port} -c "CREATE SCHEMA {self.source_db}";
        """
        res2 = self.primary_node.sh(sql2).result()
        self.log.info(res2)

        text = '----step3:创建仅录制任务 expect:成功----'
        self.log.info(text)
        self.log.info('获取版本号')
        cmd = f"cd {macro.DATAKIT_INSTALL_PATH};" \
              f"ls | grep openGauss | sed -nE 's/.*openGauss-datakit-([0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z]+)?).*/\\1/p'"
        version = self.datakit_node.sh(cmd).result()
        self.log.info(version)
        self.log.info('获取node_id')
        response_source = self.data_migration_api.get_data_migration_resource('sourceClusters')
        source_node_id = response_source.json()['data']['sourceClusters'][0]['nodes'][0]['clusterNodeId']
        response_target = self.data_migration_api.get_data_migration_resource('targetClusters')
        target_node_id = response_target.json()['data']['targetClusters'][0]['clusterNodes'][0]['nodeId']
        for i in range(5):
            try:
                self.log.info('----创建录制任务----')
                data = {"taskName": f"{self.task_name}_{i}", "sourceDbType": "MySQL",
                        "sourceIp": f"{self.mysql_node.ssh_host}",
                        "sourcePort": f'{self.mysql_node.db_port}',
                        "sourceInstallPath": f"{macro.DB_MY_MYPATH}/transcribe", "targetIp": f"{self.primary_node.ssh_host}",
                        "targetPort": self.primary_node.db_port, "targetInstallPath": f"{macro.DB_MY_MYPATH}/replay",
                        "sourceUser": f"{self.mysql_root_node.ssh_user}",
                        "targetUser": f"{self.primary_root_node.ssh_user}", "dbMap": [f"{self.source_db}:{self.target_db}"],
                        "toolVersion": f"{version}",
                        "taskType": "transcribe_replay", "sourceNodeId": f"{source_node_id}",
                        "targetNodeId": f"{target_node_id}"}
                response = self.data_migration_api.post_plugins_data_migration_transcribeReplay_save(data)
                self.log.info(response.json())
                task_id = response.json()['data']
                data = {"sql.transcribe.mode": "tcpdump", "tcpdump.network.interface": "lo",
                        "tcpdump.capture.duration": 1,
                        "tcpdump.file.name": "tcpdump-file", "tcpdump.file.size": 10,
                        "tcpdump.database.ip": f"{self.mysql_root_node.ssh_host}",
                        "tcpdump.database.port": f"{self.mysql_node.db_port}", "queue.size.limit": 10000,
                        "packet.batch.size": 10000, "should.send.file": "true", "should.check.system": "false",
                        "max.cpu.threshold": 0.85, "max.memory.threshold": 0.85, "max.disk.threshold": 0.85,
                        "remote.receiver.name": f"{self.primary_root_node.ssh_user}",
                        "remote.node.ip": f"{self.primary_root_node.ssh_host}",
                        "remote.node.port": f"{self.primary_root_node.ssh_port}", "remote.retry.count": 1,
                        "result.file.size": 10, "parse.select.result": "false",
                        "sql.storage.mode": "json",
                        "sql.file.name": "sql-file", "sql.file.size": 10, "result.file.name": "select-result",
                        "parse.max.time": 0, "file.count.limit": 100, "sql.replay.strategy": "parallel",
                        "sql.replay.multiple": 1, "sql.replay.only.query": "false",
                        "sql.replay.parallel.max.pool.size": 5,
                        "sql.replay.slow.sql.rule": "2", "sql.replay.slow.time.difference.threshold": 1000,
                        "sql.replay.slow.sql.duration.threshold": 1000, "sql.replay.slow.top.number": 5,
                        "sql.replay.session.white.list": "[]", "sql.replay.session.black.list": "[]",
                        "sql.replay.database.ip": f"{self.primary_root_node.ssh_host}",
                        "sql.replay.database.port": f"{self.primary_root_node.db_port}",
                        "sql.replay.database.schema.map": [f"{self.source_db}:{self.target_db}"],
                        "sql.replay.database.username": f"{self.primary_root_node.db_user}",
                        "sql.replay.database.password": f"{self.primary_root_node.db_password}",
                        "sql.replay.draw.interval": 100, "replay.max.time": 0,
                        "source.time.interval.replay": "false", "compare.select.result": "false", "tcpdump.file.id": ""}
                response = self.data_migration_api.post_plugins_data_migration_transcribeReplay_downloadAndConfig(
                    task_id,
                    data)
                self.log.info(response.json())
                if i >= 1:
                    self.task_id += ','
                self.task_id += f'{task_id}'
                self.log.info('查看任务是否正常创建')
                response = self.data_migration_api.get_plugins_data_migration_transcribeReplay_list()
                self.log.info(response.json())
                self.assertIn(f"\"id\": {task_id}", response.text, '任务未创建成功')
            except AssertionError:
                self.log.info(f'创建任务{i+1}失败')
        time.sleep(30)

        text = '----step4:批量删除录制回放任务 expect:成功----'
        self.log.info(text)
        response = self.data_migration_api.post_plugins_data_migration_transcribeReplay_delete(f"{self.task_id}")
        self.log.info(response.json())
        self.assertEqual(200, response.json()['code'], '执行失败' + text)
        response = self.data_migration_api.get_plugins_data_migration_transcribeReplay_list()
        self.log.info(response.json())
        self.assertEqual(0, response.json()['total'], '执行失败' + text)

    def tearDown(self):
        text = '----step5:清理环境----'
        self.log.info(text)

        self.log.info('----删除服务器----')
        self.datakit_sh.delete_host()
        self.log.info('----删除实例----')
        self.datakit_sh.delete_db()

        self.log.info('----删除数据库----')
        sql1 = f'drop database if exists {self.source_db};'
        self.log.info(sql1)
        res1 = self.com_mysql.execut_db_sql(sql1)
        self.log.info(res1)
        sql2 = f'drop database if exists {self.target_db};'
        self.log.info(sql2)
        res2 = self.com_sh.execut_db_sql(sql2)
        self.log.info(res2)
        self.assertNotIn('ERROR', res1, '删除数据库失败')
        self.assertNotIn('ERROR', res2, '删除数据库失败')
        self.log.info(f"-----{os.path.basename(__file__)} finsh-----")
