# -*- coding: utf-8 -*-
# @Time : 2025/6/26 14:04
# @FileName: 01.py
# @Target:
'''
使用 OpenAI 的方式调用 Ollama 服务
'''
import os, time, queue, copy, json, codecs
from pprint import pprint

import concurrent.futures

from openai import OpenAI
from ollama import AsyncClient


class Prompt:
    def __init__(self):
        # 到时Java把对应的Prompt不能写死，我需要随时能够更改Prompt提示词
        ...

    def quality_prompt(self, model:str, content:str):
        PROMPT = f"""
##任务目标
请从以下客户反馈中关于 {model} 提取产生抱怨的原因，统计每个原因的出现次数，按数量倒序排列，并总结产生抱怨的核心原因。  
##处理步骤
0. 完全基于客户反馈进行原因抽取，禁止添加新的内容和联想
1. 原因提取:从每条反馈中提取抱怨的核心原因并归类合并
2. 数量统计:统计每个原因在输入数据中出现的次数
3. 排序与占比计算:按数量从高到低排序
##输出格式要求
1. 原因列表
格式：`抱怨原因名称（数量：X次），有代表性的客户反馈`
2. 核心原因分析
对抱怨原因用简单话语简要解释说明
## 输入数据
{content}
"""
        return PROMPT.format(model=model, content=content)

    def cuzu_negative_prompt(self, model:str, content:str):
        PROMPT = f"""
##任务目标
请从以下客户反馈中关于 {model} 提取产生抱怨的原因，统计每个原因的出现次数，按数量倒序排列，并总结产生抱怨的核心原因。  
##处理步骤
0. 完全基于客户反馈进行原因抽取，禁止添加新的内容和联想
1. 原因提取:从每条反馈中提取抱怨的核心原因并归类合并
2. 数量统计:统计每个原因在输入数据中出现的次数
3. 排序与占比计算:按数量从高到低排序
##输出格式要求
1. 原因列表
格式：`抱怨原因名称（数量：X次），有代表性的客户反馈`
2. 核心原因分析
对抱怨原因用简单话语简要解释说明
## 输入数据
{content}
"""
        return PROMPT.format(model=model, content=content)


    def cuzu_positive_prompt(self, model:str, content:str):
        PROMPT = f"""
##任务目标
请从以下客户反馈中关于 {model} 提取情感正向的原因，统计每个原因的出现次数，按数量倒序排列，并总结形成这些情感正向的核心原因。  
##处理步骤
0. 完全基于客户反馈进行原因抽取，禁止添加新的内容和联想
1. 原因提取:从每条反馈中提取情感正向的核心原因并归类合并
2. 数量统计:统计每个原因在输入数据中出现的次数
3. 排序与占比计算:按数量从高到低排序
##输出格式要求
1. 原因列表
格式：`情感正向原因名称（数量：X次），有代表性的客户反馈`
2. 核心原因分析
对情感正向原因用简单话语简要解释说明
## 输入数据
{content}
"""
        return PROMPT.format(model=model, content=content)


    def data_unify(self, model, content_list, language_type):
        content = ""
        for content_index in range(len(content_list)):
            content += f"内容{content_index + 1}. \r\n{content_list[content_index]}\n"
        PROMPT = f"""
##任务目标
请将以下关于{model}输入内容合并融合为一个统一结论，并以*{language_type}*语言输出
0. 完全基于输入内容进行汇总，禁止添加新的内容和联想
1. 合并输入内容语义相近的原因，合并原因的出现次数
2. 按出原因出现的次数倒序排序
3. 使用Markdown格式返回结果（标题+核心问题+客户反馈举例+结论）
##处理步骤
1. 语义归类：将语义重复或高度关联的原因合并
2.. 输出Markdown格式要求  
# 客户抱怨核心问题总结
## 核心问题
- **问题1**（问题数量）
- 客户原始反馈举例
## 结论
- 结论1
- 结论2
##输入内容
{content}
"""
        return PROMPT.format(model=model, language_type=language_type, content=content)




class OllamaClient:
    def __init__(self,
                 base_url="http://117.50.223.179:11434",
                 timeout=120,
                 model="qwen3:32b-q8_0",
                 temperature=0.7,
                 max_tokens=-1,
                 api_key="ollama",
                 ):
        self.model = model
        self.temperature = temperature
        self.max_tokens = max_tokens

        self.client = OpenAI(
            base_url=f"{base_url}/v1",  # 使用兼容 OpenAI 的 /v1 端点
            timeout=timeout,
            api_key=api_key,
        )
        self.async_client = AsyncClient(
            host=base_url,
            timeout=timeout,
        )

        self.prompt = Prompt()

    def list_models(self):
        """获取可用的模型列表"""
        return self.client.models.list()

    def chat_completion(self, messages, **kwargs):
        # 设置默认参数
        params = {
            "model": self.model,
            "messages": messages,
            "stream": kwargs.get("stream", False),
            "temperature": self.temperature,
            "max_tokens": self.max_tokens,
            "top_p": kwargs.get("top_p", 1.0),
            "frequency_penalty": kwargs.get("frequency_penalty", 0),
            "presence_penalty": kwargs.get("presence_penalty", 0),
        }
        response = self.client.chat.completions.create(**params)
        return response

    def stream_chat(self, messages, **kwargs):
        """流式聊天响应生成器"""
        kwargs["stream"] = True
        response = self.chat_completion(messages, **kwargs)
        for chunk in response:
            if chunk.choices:
                delta = chunk.choices[0].delta
                if delta.content:
                    yield delta.content

    def multiprocessing_worker(self, task, success_queue, error_queue):
        '''
        task : 直接是对应的组装完成的prompt
        '''
        try:
            model = self.model
            messages = [
                {"role": "user", "content": task['content']}
            ]
            response = self.chat_completion(messages)
            res = response.choices[0].message.content
            status_code = 200
            success_queue.put({
                'model': model,
                'messages': messages,
                'status_code': status_code,
                'result': res
            })
        except Exception as e:
            model = self.model
            status_code = 500
            error_queue.put({
                'model': model,
                'messages': None,
                'status_code': status_code,
                'result': str(e)
            })

    def process_tasks(self,
                      tasks,
                      max_workers):
        start_time = time.time()
        success_queue = queue.Queue()
        error_queue = queue.Queue()

        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = []
            for task in tasks:
                future = executor.submit(
                    self.multiprocessing_worker,
                    task,
                    success_queue,
                    error_queue
                )
                futures.append(future)

            # 等待所有任务完成
            concurrent.futures.wait(futures)

            # 从队列中提取所有结果
            success_results = []
            while not success_queue.empty():
                success_results.append(success_queue.get())

            error_results = []
            while not error_queue.empty():
                error_results.append(error_queue.get())

        end_time = time.time()
        # while not result_queue.empty():
        #     results.append(result_queue.get())

        return success_results, error_results, end_time - start_time

    def split_string(self, s: str) -> list:
        chunk_size = 5000
        max_chunks = 5
        result = []
        total_length = len(s)
        max_allowed = chunk_size * max_chunks

        if total_length == 0:
            return result  # 空字符串返回空列表

        if total_length <= max_allowed:
            # 正常分割
            for i in range(0, total_length, chunk_size):
                _s = s[i:i + chunk_size]
                # 这里想要避免换行符打断上下文，但是没必要，太短的句子无需打断，太长的句子无影像
                result.append(_s)
        else:
            # 分割前 max_allowed 字符
            for i in range(0, max_allowed, chunk_size):
                _s = s[i:i + chunk_size]
                result.append(_s)
            # 剩余部分作为一个整体，剩余部分不要了
            # remaining = s[max_allowed:]
            # if remaining:
            #     result.append(remaining)

        return result

    def clean_content(self, content):
        _content = copy.deepcopy(content)
        _content = _content.replace('\n', ''). \
            replace('\r', ''). \
            replace('\t', ''). \
            replace(' ', ''). \
            replace('"', ''). \
            replace('', ''). \
            replace('”', ''). \
            replace('{', ''). \
            replace('}', ''). \
            replace('[', ''). \
            replace(']', '')
        return _content

    def parse_result(self, sucess_results, error_results):
        '''
        将对应的数据提取，并删除掉 <think> 相关的内容
        '''
        sucess_results_res = []
        for success in sucess_results:
            result = success['result']
            if "</think>" in result:
                result = result.split('</think>')[1]
                sucess_results_res.append(result)
            else:
                sucess_results_res.append(result)
        return sucess_results_res

    def analysis_batch(self, data, type, language_type):
        '''
        批量数据分析
        data 这里统一使用一个车系的一个问题来做，避免过多的数据导致大模型的返回太差
        data格式
        [
            {
                'model' : 'XXXXX',
                'content': 'XXXXX',
            },
            {
                'model' : 'XXXXX',
                'content': 'XXXXX',
            },
            ... ...
        ]
        '''
        # 首先对model进行数据区分，确保所有的数据的model都是一样的
        model = data[0]['model']

        data_content_list = set()
        for d in data:
            content = d['content']
            data_content_list.add(self.clean_content(content))
        data_content_list = list(data_content_list)
        data_content_list = '\n'.join(data_content_list)
        # Map
        tasks_string = self.split_string(s=data_content_list)
        tasks = []
        if type == 'quality_analyze':
            for task_s in tasks_string:
                content = self.prompt.quality_prompt(
                    model=model,
                    content=task_s
                )
                tasks.append({
                    "role": "user",
                    'content': content
                })
        elif type == 'cuzu_negative':
            for task_s in tasks_string:
                content = self.prompt.cuzu_negative_prompt(
                    model=model,
                    content=task_s
                )
                tasks.append({
                    "role": "user",
                    'content': content
                })
        elif type == 'cuzu_positive':
            for task_s in tasks_string:
                content = self.prompt.cuzu_positive_prompt(
                    model=model,
                    content=task_s
                )
                tasks.append({
                    "role": "user",
                    'content': content
                })
        elif type == 'cuzu_neutral' :
            # 情感中性的不用做，没必要做这个内容
            ...
        print('------')
        print(len(tasks))
        for task in tasks:
            print(len(task['content']))
        print('------')
        success_results, error_results, cost_time = self.process_tasks(tasks, len(tasks))

        parse_result_list = self.parse_result(success_results, error_results)
        data_unify_prompt = self.prompt.data_unify(
            model=model,
            content_list=parse_result_list,
            language_type=language_type
        )
        print('------')
        print(data_unify_prompt)
        print('------')
        response = self.chat_completion([
                {"role": "user", "content": data_unify_prompt}
            ], stream=True)
        for chunk in response:
            if chunk.choices:
                delta = chunk.choices[0].delta
                if delta.content:
                    yield delta.content

        # yield self.stream_chat([
        #         {"role": "user", "content": data_unify_prompt}
        #     ])

    def analysis_single(self, data, type, language_type):
        model = data[0]['model']

        data_content_list = set()
        for d in data:
            content = d['content']
            data_content_list.add(self.clean_content(content))
        data_content_list = list(data_content_list)
        data_content_list = '\n'.join(data_content_list)
        if type == 'quality_analyze':
            content = self.prompt.quality_prompt(
                model=model,
                content=data_content_list
            )
            chat_completion_res = self.chat_completion(
                messages=[{"role": "user", "content": content}]
            )
            chat_completion_res = chat_completion_res.choices[0].message.content
            chat_completion_res = {
                'model' : self.model,
                'status_code' : 200,
                'result'  : chat_completion_res
            }
        elif type == 'cuzu_negative':
            content = self.prompt.cuzu_negative_prompt(
                model=model,
                content=data_content_list
            )
            chat_completion_res = self.chat_completion(
                messages=[{"role": "user", "content": content}]
            )
            chat_completion_res = chat_completion_res.choices[0].message.content
            chat_completion_res = {
                'model' : self.model,
                'status_code' : 200,
                'result'  : chat_completion_res
            }
        elif type == 'cuzu_positive':
            content = self.prompt.cuzu_positive_prompt(
                model=model,
                content=data_content_list
            )
            chat_completion_res = self.chat_completion(
                messages=[{"role": "user", "content": content}]
            )
            chat_completion_res = chat_completion_res.choices[0].message.content
            chat_completion_res = {
                'model' : self.model,
                'status_code' : 200,
                'result'  : chat_completion_res
            }
        elif type == 'cuzu_neutral' :
            # 情感中性的不用做，没必要做这个内容
            ...


        parse_result_list = self.parse_result([chat_completion_res], [])
        data_unify_prompt = self.prompt.data_unify(
            model=model,
            content_list=parse_result_list,
            language_type=language_type
        )
        response = self.chat_completion([
                {"role": "user", "content": data_unify_prompt}
            ], stream=True)

        for chunk in response:
            if chunk.choices:
                delta = chunk.choices[0].delta
                if delta.content:
                    yield delta.content

        # yield self.stream_chat([
        #         {"role": "user", "content": data_unify_prompt}
        #     ])




if __name__ == '__main__':
    # 打印Ollma服务器 117.50.223.179 支持的模型
    ollama = OllamaClient()
    A = ollama.list_models()
    for a in A:
        print(a.id, '  ', a.object, '   ', a.owned_by)

    # 测试质量相关的数据反馈结果，测试单个数据的显示的效果
    try_data = []
    with codecs.open(filename='01-quality-analyze-20250624.json',
                     mode='r', encoding='utf-8') as fr:
        for line in fr:
            line = json.loads(line)
            series = line['series']
            complaint_tags = line.get('complaint_tags', [])
            if len(complaint_tags) and series=='ID.3':
                for c in complaint_tags:
                    quality_3 = c['quality_3']
                    if '黑屏' in quality_3:
                        try_data.append({
                            'model' : 'ID.3',
                            'content' : ''.join(c['content_list']),
                        })
    print(len(try_data))
    pprint(try_data)
    response = ollama.analysis_single(try_data, 'quality_analyze', '英文')

    res_total = """"""
    for chunk in response:
        res_total += chunk
    print("单批数据测试 FINAL RESULT")
    print(res_total)
    print("单批数据测试 FINAL RESULT")


    print("------------------------------------------------------")
    # 测试大量数据进行运行时候的效果
    try_data = []
    with codecs.open(filename='01-quality-analyze-20250624.json',
                     mode='r', encoding='utf-8') as fr:
        for line in fr:
            line = json.loads(line)
            series = line['series']
            complaint_tags = line.get('complaint_tags', [])
            if len(complaint_tags) and series=='ID.3':
                for c in complaint_tags:
                    # quality_3 = c['quality_3']
                    # if '黑屏' in quality_3:
                        try_data.append({
                            'model' : 'ID.3',
                            'content' : ''.join(c['content_list']),
                        })
    print(len(try_data))
    pprint(try_data)
    response = ollama.analysis_batch(try_data, 'quality_analyze', '中文')

    res_total = """"""
    for chunk in response:
        res_total += chunk
    print("FINAL RESULT")
    print(res_total)
    print("FINAL RESULT")







