from chat import ChatService
import time
from google.protobuf import json_format
import threading
import random
import os

class ChatHttpClient(object):
    """
    XmlRpcClient is a wrapper around the standard library's ServerProxy.
    It proxies any function calls and fires the *request* event when they finish,
    so that the calls get recorded in Locust.
    """

    def __init__(self, request_event, url, model_name, api_key='<API_KEY>', config=None):
        # super().__init__(options_file)
        self.service = ChatService(url=url, model=model_name, api_key=api_key, wsid='', config=config)
        self._request_event = request_event

    def __getattr__(self, name):
        func = getattr(self.service, name)
        def wrapper(*args, **kwargs):
            request_meta_first = {
                "request_type": "trpc",
                "name": 'first',
                "start_time": time.time(),
                "response_length": 0,  # calculating this for an xmlrpc.client response would be too hard
                "response": None,
                "context": {},  # see HttpUser if you actually want to implement contexts
                "exception": None,
                "response_time": None,
            }
            request_meta_all = {
                "request_type": "trpc",
                "name": 'whole',
                "start_time": time.time(),
                "response_length": 0,  # calculating this for an xmlrpc.client response would be too hard
                "response": None,
                "context": {},  # see HttpUser if you actually want to implement contexts
                "exception": None,
                "response_time": None,
            }
            request_meta_token_per_second = {
                "request_type": "trpc",
                "name": 'token_per_second',
                "start_time": time.time(),
                "response_length": 0,  # calculating this for an xmlrpc.client response would be too hard
                "response": None,
                "context": {},  # see HttpUser if you actually want to implement contexts
                "exception": None,
                "response_time": None,
            }
            request_meta_error = {
                "request_type": "trpc",
                "name": 'error',
                "start_time": time.time(),
                "response_length": 0,  # calculating this for an xmlrpc.client response would be too hard
                "response": None,
                "context": {},  # see HttpUser if you actually want to implement contexts
                "exception": None,
                "response_time": None,
            }
            t = time.perf_counter()
            try:
                content, first_time, all_time, prompt_tokens, completion_tokens, query_id, token_per_second = func(*args, **kwargs)
                logging.info(f'## REQUEST_STAT {"success"} {first_time} {all_time} {prompt_tokens} {completion_tokens} {query_id} {token_per_second}')

                request_meta_first["response"] = content
                request_meta_first["response_length"] = prompt_tokens
                request_meta_first["response_time"] = first_time

                request_meta_all["response"] = content
                request_meta_all["response_length"] = completion_tokens
                request_meta_all["response_time"] = all_time

                request_meta_all["response_length"] = completion_tokens
                request_meta_token_per_second["response_time"] = token_per_second

                self._request_event.fire(
                    **request_meta_first)  # This is what makes the request actually get logged in Locust
                self._request_event.fire(**request_meta_all)
                self._request_event.fire(**request_meta_token_per_second)

            except Exception as e:
                logging.exception('request error!')
                response_time = (time.perf_counter() - t) * 1000
                request_meta_error["response_time"] = response_time
                request_meta_error["exception"] = e
                self._request_event.fire(**request_meta_error)
                logging.info(f'## REQUEST_STAT {"error"} {0} {0} {0} {0} {"query_id"} {response_time}')

        return wrapper


from locust import User, task, FastHttpUser, events, tag
from locust.argument_parser import LocustArgumentParser
import os
import json
import logging

import model_config


DATA_LOCK = threading.Lock()



@events.init_command_line_parser.add_listener
def add_custom_arguments(parser: LocustArgumentParser):
    parser.add_argument("--url", type=str, choices=model_config.url_list,
                        default=model_config.url, help="模型地址url")
    parser.add_argument("--file", type=str,  choices=model_config.data_file_list,
                        default=model_config.data_file, help="数据集")
    parser.add_argument("--model_name", type=str, choices=model_config.model_name_list,
                        default=model_config.model_name, help="模型名称")
    parser.add_argument("--share_data", type=int, choices=model_config.share_data_list,
                        default=model_config.share_data, help="不同并发是否共享数据，0：不共享，1：共享, 2: 共享且只跑一轮")
    parser.add_argument("--sample_type", type=int,  choices=model_config.sample_type_list,
                        default=model_config.sample_type, help="数据采样规则，0：顺序，1：随机")
    parser.add_argument("--use_uuid", type=int,  choices=model_config.use_uuid_list,
                        default=model_config.use_uuid, help="是否在请求头中添加uuid，0：不添加，1：添加")
    parser.add_argument("--max_tokens", type=int,
                        default=4096, help="最大输出token")
    parser.add_argument("--api_key", type=str, default="", help="api key，可空")



class LLmuser(User):

    def __init__(self, environment):
        super().__init__(environment)
        self.env = environment
        self.url = self.env.parsed_options.url
        self.file = self.env.parsed_options.file
        self.model_name = self.env.parsed_options.model_name
        self.share_data = self.env.parsed_options.share_data
        self.sample_type = self.env.parsed_options.sample_type
        self.api_key = self.env.parsed_options.api_key

        self.config = {
            'use_uuid': self.env.parsed_options.use_uuid,
            'max_tokens': self.env.parsed_options.max_tokens,
        }
        self.client = ChatHttpClient(request_event=environment.events.request,
                                     url=self.url,
                                     model_name=self.model_name,
                                     api_key=self.api_key,
                                     config=self.config)
        logging.info(f'running param, {self.url}, {self.file}, {self.model_name},  share_data: {self.share_data},'
                     f' sample_type: {self.sample_type}')


    def on_start(self):
        logging.info(f'## REQUEST_STAT user_start {time.time()}')
        with DATA_LOCK:
            if not hasattr(self.env, 'shared_data'):
                logging.info('init share data')
                self.env.shared_data = {
                    'read_file_flag': False,
                    'data_list': [],
                    'concurrency_num': 0,
                }
            logging.info('start: %s', self.env.shared_data['read_file_flag'])
            if self.env.shared_data['read_file_flag'] is False:
                self.env.shared_data['read_file_flag'] = True
                logging.info('read file')
                self.env.shared_data['data_list'] = []
                with open(self.file, 'r', encoding='utf8') as f_mix:
                    for line in f_mix.readlines():
                        if line:
                            self.env.shared_data['data_list'].append(line)
                if self.sample_type == 1:
                    random.shuffle(self.env.shared_data['data_list'])
                    logging.info('data shuffle')
            if self.share_data == 2:
                self.env.shared_data['concurrency_num'] += 1

    def on_stop(self):
        logging.info(f'## REQUEST_STAT user_end {time.time()}')
        with DATA_LOCK:
            self.env.shared_data['read_file_flag'] = False
            logging.info('stop...')

    @tag('mix')
    @task
    def grpc_client_task(self):
        index = 0
        data_len = len(self.env.shared_data['data_list'])
        random_index = list(range(data_len))
        random.shuffle(random_index)
        while True:
            if self.share_data == 1:
                line = self.env.shared_data['data_list'].pop(0)
                self.env.shared_data['data_list'].append(line)
            elif self.share_data == 2:
                if len(self.env.shared_data['data_list']) == 0:
                    logging.info('data_list is empty')
                    with DATA_LOCK:
                        self.env.shared_data['concurrency_num'] -= 1
                        logging.info(self.env.shared_data['concurrency_num'])
                        if self.env.shared_data['concurrency_num'] == 0:
                            logging.info('concurrency_num is 0, quit')
                            self.env.runner.quit()
                            break
                    time.sleep(600)
                else:
                    line = self.env.shared_data['data_list'].pop(0)
            elif self.sample_type == 1:
                line = self.env.shared_data['data_list'][random_index[index % data_len]]
                index += 1
            else:
                line = self.env.shared_data['data_list'][index % data_len]
                index += 1
            messages = json.loads(line)
            if 'messages' in messages:
                messages = messages['messages']
            rsp = self.client.send_stream(messages=messages)
