# coding:utf-8
# -------------------------------------------------------------------------------
# @Create Time  : 2024/04/10 11:45
# @Author       : kony.xie
# @File Name    : execute.py
# @Python Ver.  : Python 3.8.10
# @Description  : 性能自动化测试平台的主程序
# -------------------------------------------------------------------------------

from base.models import Project, Scene, Result, Baseline
import time, datetime
import subprocess, os
from threading import Thread
from lib.mail import Mail
from lib.csv2html import CsvToHtml
from lib.runWorkers import run_locust
from shutil import copyfile
import inspect
import ctypes
from bs4 import BeautifulSoup
import logging

logging.basicConfig(
    level=logging.DEBUG,  # 设置日志级别为INFO，这样ERROR级别及以上的日志都会被记录
    filemode='a',  # 以追加模式打开日志文件
    filename='./log/django.log',
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'  # 设置日志格式
)
logger = logging.getLogger(__name__)

def stop_case(project, tid, exctype):
    tid = ctypes.c_long(tid)
    if not inspect.isclass(exctype):
        exctype = type(exctype)
    res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
    if res == 0:
        return {"code": 1, "msg": "Invalid thread identifier,please try again later.", "data": ""}
    elif res != 1:
        # """if it returns a number greater than one, you're in trouble,
        # and you should call it again with exc=NULL to revert the effect"""
        ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
        raise SystemError("PyThreadState_SetAsyncExc failed")
    order = f'ps -ef|grep ApacheJMeter|grep {project}|grep -v grep|awk \'{{print $2}}\''
    pid = subprocess.getoutput(order)
    print("pid: ", pid)
    if pid:
        order = f'netstat -nap|grep {pid}/java|grep udp6|awk \'{{print $4}}\'|awk -F ":" \'{{print $4}}\''
        port = subprocess.getoutput(order)
        print("port: ", port)
        if port:
            retcode = subprocess.call(["stoptest.sh", port])
    Project.objects.filter(project=project).update(status=0, ident=None)
    Scene.objects.filter(project=project).update(status=0)
    return {"code": 0, "msg": "success", "data": ""}

class Execute:
    def __init__(self, project):
        prj_conf = Project.objects.filter( project=project ).values( "mails", "workers", "engine", "status" )
        config = prj_conf[0]
        self.project = project
        self.workers = config["workers"]
        self.mails = config["mails"]
        self.engine = config["engine"]
        self.status = config["status"]
        self.count = self.workers.count(',')+1
        self.path = f'/test/performance/projects/{project}'
        self.logpath = f'/test/performance/projects/{project}/results/logs'
        self.basepath = f'/test/performance/Atp4P'

    def async_call(self, scenes, engine):
        t = Thread(target=self.execute, args=(scenes, engine))
        t.start()
        logger.info(f"Started the test of {self.project}, thread identifier is {t.ident}")
        Project.objects.filter(project=self.project).update(ident=t.ident)
     
    def run_case(self, model, scene_id=None):
        if self.status == 1:
            return f"{self.project} is running now"
        if model == 'project':
            scenes = Scene.objects.filter(project=self.project, status=0).values("id", "scene_type", "params", "threshold", "trans_name", "threads", "duration")
        elif model == 'scene':
            scenes = Scene.objects.filter(id=scene_id, status=0).values("id", "scene_type", "params", "threshold", "trans_name", "threads", "duration")
        self.async_call(scenes, self.engine)
        case_run = {"code": 0, "msg": "success", "data": ""}
        return case_run

    def execute(self, scenes, engine):
        # getcpu = GetCPU()     # Instantiate GetCPU, check whether it can be started, deprecated
        for case in scenes:         # Loop execution scenario
            scene_id = case['id']
            scene_type = case['scene_type']
            params = case['params']
            trans = case['trans_name']
            duration = case['duration']
            threads = case['threads']
            threshold = case['threshold']
            P = ''
            relt_path = f'{self.path}/results'
            copyfile('/test/performance/Atp4P/lib/mail.temp', f'{self.path}/mail.temp')
            # Change the working path, otherwise locust cannot read the parameterized file
            os.chdir( f'/test/performance/projects/{self.project}/data/{self.project}' )
            Project.objects.filter(project=self.project).update(status=1)
            Scene.objects.filter(id=scene_id).update(status=1)
            if scene_type == 'single':
                for thread in threads.split(','):
                    now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
                    filename = f'{trans}_{thread}vu_{duration}s_{now_time}'
                    if engine == 'jmeter':
                        for p in params.split():
                            P = P + ' -G' + p
                        if len(self.workers) == 0:
                            order = f'jmeter -J{trans}={thread} -Jduration={duration} {P} -n -t {self.path}/scripts/single.jmx -l {relt_path}/{filename}.jtl -j {self.logpath}/{filename}.log'
                            subprocess.getoutput( order )
                        else:
                            vu = int(int(thread) / self.count)
                            order = f'jmeter -G{trans}={vu} -Gduration={duration} {P} -R {self.workers} -n -t {self.path}/scripts/single.jmx -l {relt_path}/{filename}.jtl -j {self.logpath}/{filename}.log'
                            subprocess.getoutput(order)
                    elif engine == 'locust':
                        for p in params.split():
                            P = P + ' --' + p
                        if len(self.workers) == 0:
                            order = f'locust -f {self.path}/scripts/single.py --headless -u {thread} -r 50 -t {duration}s -T {trans} {P} --logfile={self.logpath}/{filename}.log --csv={relt_path}/{filename} >> {self.logpath}/{filename}.log 2>&1'
                            logger.info(order)
                            subprocess.getoutput( order )
                        else:
                            run_locust(self.project, scene_type, self.workers, P)
                            order = f'locust -f {self.path}/scripts/single.py --master --headless --expect-workers={self.count} -u {thread} -r 50 -t {duration}s -T {trans} {P} --logfile={self.logpath}/{filename}.log --csv={relt_path}/{filename} >> {self.logpath}/{filename}.log 2>&1'
                            logger.info( order )
                            subprocess.getoutput(order)
                    msg = self.data_proc(relt_path, filename, threshold, trans, engine, thread)
                    if msg == 'fail':
                        logger.info("stop the scene")
                        break
            elif scene_type == 'mixed' or scene_type == 'stability':
                tran = trans.split(',')
                for thread in threads.split(';'):
                    th = thread.split(',')
                    th_all = 0
                    for i in range(0,len(tran)):
                        th_all = int(th_all) + int(th[i])
                        vu = int(int(th[i]) / self.count)
                        P = f'{P} -G{tran[i]}={vu}'
                    for p in params.split():
                        P = P + ' -G' + p
                    now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
                    filename = f'{scene_type}_{th_all}vu_{duration}s_{now_time}'
                    order = f'jmeter -Gduration={duration} {P} -R {self.workers} -n -t {self.path}/scripts/{scene_type}.jmx -l {relt_path}/{filename}.jtl -j {self.logpath}/{filename}.log'
                    result = subprocess.getoutput(order)
                    if scene_type == 'mixed':
                        msg = self.data_proc(relt_path, filename, threshold, scene_type, engine, th_all)
                    if msg == 'fail':
                        logger.info("stop the scene")
                        break
            self.send_mail(f'{self.path}/mail.temp', scene_type, scene_id)
            time.sleep(30)

    def send_mail(self, content, title, scene_id=None):
        subject = f'{self.project}_{title}'
        mail = Mail()
        mail.send(content, subject, self.mails)
        if os.path.isfile(content):
            # If it's the mail.temp file, delete it
            os.remove(content)
        if scene_id is not None:
            Project.objects.filter(project=self.project).update(status=0)
            Scene.objects.filter(id=scene_id).update(status=0)

    # Processing data: Check if the error rate is too high, parse jtl files or csv files into html files
    def data_proc(self, relt_path, filename, threshold, trans, engine, thread):
        erate = 0
        html_file = f'{relt_path}/{filename}.html'
        if engine == 'jmeter':
            order = f'grep \'summary =\' {self.logpath}/{filename}.log|tail -n 1|awk -F "(" \'{{print $2}}\'|awk -F "%" \'{{print $1}}\''
            erate = subprocess.getoutput(order)
            if float(erate) > float(threshold):
                self.send_mail('There are too many errors, the subsequent scenario test has been stopped, and the previous test results have been sent by email', filename)
                order = f'java -jar {self.basepath}/lib/saxon-9.x.jar -s:{relt_path}/{filename}.jtl -o:{html_file} -xsl:{self.basepath}/lib/report2html_simple.xsl titleReport={filename}'
                os.system(order)
                msg = 'fail'        # Too many errors, set to fail
            else:
                # report2html.xsl有问题，提取不到responseData，估计是xml结果有问题，暂时无法解决
                order = f'java -jar {self.basepath}/lib/saxon-9.x.jar -s:{relt_path}/{filename}.jtl -o:{html_file} -xsl:{self.basepath}/lib/report2html.xsl titleReport={filename}'
                os.system(order)
                msg = 'success'
            # html appends to the temporary file for sending mail
            with open( html_file, 'r', encoding='UTF-8', errors='ignore' ) as source:
                content = source.read()
            with open(f'{self.path}/mail.temp', 'a+', encoding='UTF-8', errors='ignore') as target:
                target.write(content)
            # The test results file is recorded to the result table
            jtlFile = Result(project=self.project, tran_name=trans, file_name=f'{filename}.jtl', path=f'{relt_path}/{filename}.jtl')
            jtlFile.save()
            htmlFile = Result(project=self.project, tran_name=trans, file_name=f'{filename}.html', path=html_file)
            htmlFile.save()
        elif engine == 'locust':
            order = f'grep \'Aggregated\' {self.logpath}/{filename}.log|tail -n 2|head -1|awk -F "(" \'{{print $2}}\'|awk -F "%" \'{{print $1}}\''
            erate = subprocess.getoutput(order)
            inFile = f'{relt_path}/{filename}_stats.csv'
            html = CsvToHtml(inFile, html_file, filename)
            html.createReport()
            if float(erate) > float(threshold):
                self.send_mail('There are too many errors, the subsequent scenario test has been stopped, and the previous test results have been sent by email', trans)
                msg = 'fail'
            else:
                msg = 'success'
            # Reads html and adds it to temp for sending mail
            with open( html_file, 'r', encoding='UTF-8', errors='ignore' ) as source:
                content = source.read()
            with open(f'{self.path}/mail.temp', 'a+', encoding='UTF-8', errors='ignore') as target:
                target.write(content)
            # The test results file is recorded to the result table
            statsFile = Result(project=self.project, tran_name=trans, file_name=f'{filename}_stats.csv', path=inFile)
            statsFile.save()
            failuresFile = Result(project=self.project, tran_name=trans, file_name=f'{filename}_failures.csv', path=f'{relt_path}/{filename}_failures.csv')
            failuresFile.save()
            htmlFile = Result(project=self.project, tran_name=trans, file_name=f'{filename}.html', path=html_file)
            htmlFile.save()
        base_line = Baseline.objects.filter( project=self.project, tran_name=trans, thread=thread ).values("tps")
        TPS = base_line[0]["tps"] if base_line else None
        self.compare(html_file, TPS)
        return msg

    def isStop(self, html_file):
        # Used to check whether the cpu exceeds the threshold and is deprecated
        msg = 'success'
        order = f'grep "%<" {html_file}|grep "<td>"|awk -F "%" \'{{print $1}}\'|awk -F ">" \'{{print $2}}\''
        percent = subprocess.getoutput(order)
        if len(percent)==0:
            msg = 'fail'
        else:
            for p in percent.split('\n'):
                if float(p) > self.cpu_max:
                    self.send_mail('The CPU or memory usage exceeds the threshold. Subsequent scenario tests have been stopped. The previous test results have been sent by email', self.project)
                    msg = 'fail'
                    break
        return msg

    def compare(self, html_file, TPS):
        if TPS is not None:
            with open( html_file, 'r', encoding='UTF-8', errors='ignore' ) as source:
                html_content = source.read()
            soup = BeautifulSoup( html_content, 'lxml' )
            # Extract all table rows
            rows = soup.find_all( 'tr' )
            # Extract the summary module
            cells = rows[1].find_all( 'td' )
            if cells:
                qps_tps = cells[6].text.strip()
                if (float(TPS)-float(qps_tps))/float(TPS)>0.15:
                    self.send_mail(f'TPS is well below baseline, please check!\n{html_file}', self.project )
        else:
            pass