#!/usr/bin/python
# -*- coding: UTF-8 -*-
from http import HTTPStatus

import dashscope
from dashscope import TextEmbedding
from dashvector import Client, Doc

# Created by hadoop on 17-8-9.
# Notebook demo
# https://github.com/modelscope/modelscope/blob/release/1.13/examples/pytorch/application/qwen_doc_search_QA_based_on_dashscope.ipynb
# 【增强大模型问答能力！通义千问7B基于本地知识库问答操作指南】https://www.bilibili.com/video/BV1Xu4y1B7eg?vd_source=e55a5acfa9ac8b98a0c54d7dea66e9a1

# modelScope安装比较复杂
# https://www.modelscope.cn/docs/%E7%8E%AF%E5%A2%83%E5%AE%89%E8%A3%85
# initialize qwen 7B model
# from modelscope import AutoModelForCausalLM, AutoTokenizer
# from modelscope import GenerationConfig

# get env variable from .env
# please make su·re DASHSCOPE_KEY is defined in .env
# load_dotenv()
dashscope.api_key = 'sk-73d63b18321142d4a54de172866d2573'

# initialize DashVector for embedding's indexing and searching
dashvector_client = Client(api_key='sk-9k1sIQJvHsfOV4BmcLDFKeYEcGQ3H97172D44E36311EEBB3F02F685E693CB',
                           endpoint='vrs-cn-g4t3nm78h0006h.dashvector.cn-hangzhou.aliyuncs.com')

# # define collection name
# collection_name = 'news_embeddings'
collection_name = 'tianlongbabu'

#
# # delete if already exist
# dashvector_client.delete(collection_name)
#
# # create a collection with embedding size of 1536
# rsp = dashvector_client.create(collection_name, 1536)
collection = dashvector_client.get(collection_name)


def generate_embeddings(docs):
    # create embeddings via DashScope's TextEmbedding model API
    rsp = TextEmbedding.call(model=TextEmbedding.Models.text_embedding_v1,
                             input=docs)
    embeddings = [record['embedding'] for record in rsp.output['embeddings']]
    return embeddings if isinstance(docs, list) else embeddings[0]


def search_relevant_context(question_param, top_k=1, client=dashvector_client):
    # query and recall the relevant information
    global collection

    # recall the top k similarity results from DashVector
    rsp = collection.query(generate_embeddings(question_param), output_fields=['raw'],
                           topk=top_k)
    return "".join([item.fields['raw'] for item in rsp.output])


# 从环境上下载模型1.83G 在本地使用pytorch进行推理  todo 本地根据跑不了
# tokenizer = AutoTokenizer.from_pretrained("qwen/Qwen-7B-Chat", revision='v1.0.5', trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained("qwen/Qwen-7B-Chat", revision='v1.0.5', device_map="auto",
#                                              trust_remote_code=True, fp16=True).eval()
# model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", revision='v1.0.5',
#                                                            trust_remote_code=True)  # 可指定不同的生成长度、top_p等相关超参


# define a prompt template for the vectorDB-enhanced LLM generation
# def answer_question(question, context):
#     prompt = f'''请基于```内的内容回答问题。"
# 	```
# 	{context}
# 	```
# 	我的问题是：{question}。
#     '''
#     history = None
#     print(prompt)
#     response, history = model.chat(tokenizer, prompt, history=None)  # todo? 这里为啥要传递一个tokenizer
#     return response


def call_llm(prompt):
    """
    模型直答
    """
    response = dashscope.Generation.call(
        model=dashscope.Generation.Models.qwen_turbo,
        prompt=prompt
    )
    if response.status_code == HTTPStatus.OK:
        print(response.output)
    else:
        print(response.code)
        print(response.message)


def call_with_prompt(question_param, context_param):
    prompt = f'''请基于```内的内容回答问题。
    	```
    	    {context_param}
    	```
    	我的问题是：{question_param}。
        '''
    history = None
    print(prompt)

    response = dashscope.Generation.call(
        model=dashscope.Generation.Models.qwen_turbo,
        prompt=prompt
    )
    # The response status_code is HTTPStatus.OK indicate success,
    # otherwise indicate request is failed, you can get error code
    # and message from code and message.
    if response.status_code == HTTPStatus.OK:
        return response.output  # The output text
    else:
        print(response.code)  # The error code.
        print(response.message)  # The error message.
        return None


if __name__ == '__main__':
    # test the case with knowledge
    # question = "段誉的亲生父亲是谁"
    # question = "虚竹和乔峰是什么关系"
    # question = "虚竹的母亲是谁"
    question = "段誉最喜欢的人是谁"
    context = search_relevant_context(question, top_k=3)  # 设置召回系数
    # answer = answer_question(question, context)
    answer = call_with_prompt(question, context)
    print(f'question: {question}\n' f'answer: {answer}')

    # call_llm(question)

    pass
