from openai import OpenAI
from modelscope import AutoModelForCausalLM, AutoTokenizer
import os

from local_db_manager import LocalDBManager

os.environ['MODELSCOPE_CACHE'] = 'E:/Dept/Python/Model/'

MODELSCOPE_SDK_TOKEN = "1903fa90-726a-4021-9bb7-0f21a0e8b0ab"   # 请替换成您的ModelScope SDK Token


class OpenAIService:
    def __init__(self):
        self.client = OpenAI(
            api_key=MODELSCOPE_SDK_TOKEN,
            base_url="https://api-inference.modelscope.cn/v1/"
        )

    def call_api(self, model_id, content):
        # set extra_body for thinking control
        extra_body = {
            # enable thinking, set to False to disable
            "enable_thinking": True,
            # use thinking_budget to contorl num of tokens used for thinking
            "thinking_budget": 4096
        }

        response = self.client.chat.completions.create(
            model=model_id,  # ModelScope Model-Id
            messages=[
                {
                    'role': 'user',
                    'content': content
                }
            ],
            stream=True,
            extra_body=extra_body
        )
        result = ''
        done_thinking = False
        for chunk in response:
            thinking_chunk = chunk.choices[0].delta.reasoning_content
            answer_chunk = chunk.choices[0].delta.content
            if thinking_chunk != '':
                print(thinking_chunk, end='', flush=True)
            elif answer_chunk != '':
                if not done_thinking:
                    print('\n\n === Final Answer ===\n')
                    done_thinking = True
                print(answer_chunk, end='', flush=True)
            result = thinking_chunk + answer_chunk
        return result


class QwenChatbot:
    def __init__(self, model_name):
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModelForCausalLM.from_pretrained(model_name)
        self.history = []

    def generate_response(self, user_input):
        messages = self.history + [{"role": "user", "content": user_input}]

        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        inputs = self.tokenizer(text, return_tensors="pt")
        response_ids = self.model.generate(**inputs, max_new_tokens=32768)[0][len(inputs.input_ids[0]):].tolist()
        response = self.tokenizer.decode(response_ids, skip_special_tokens=True)

        # Update history
        self.history.append({"role": "user", "content": user_input})
        self.history.append({"role": "assistant", "content": response})

        return response


class MockLLMService:
    @staticmethod
    def get_answer(question):
        db = LocalDBManager('data/qa.db')
        table_name = 'questions_answers'
        answer = ""
        col_dict = ['answer']
        where_dict = {"question LIKE ": f"%{question}%"}
        like_result = db.query(table_name, columns=col_dict, where=where_dict)
        if len(like_result) > 0:
            for row in like_result:
                answer = answer + row['answer']
            print('get_answer :: ', answer)
            return answer
        else:
            return "对不起，没有找到答案。"
