# @FileName  : main.py
# @Time      : 2025/2/6 7:18
# @Author    : LuZhaoHui
# @Software  : PyCharm


# Please install OpenAI SDK first: `pip3 install openai`

from tool import *
# from openai import OpenAI
import requests
import json
import base64

# 设置 API 密钥和端点
apiUrl = "https://api.siliconflow.cn/v1/chat/completions"
apiKey = "sk-kitgmqyorzmxyipdujbjzwxorwzlfkuuxzsgbkgmaftpudiw"
apiModel = "deepseek-ai/DeepSeek-V3"
modelPath = "/home/luzhaohui/models"


def getHead():
    return {
        "Authorization": "Bearer %s" % (apiKey),
        "Content-Type": "application/json"
    }


def getTextDeepV3(userText):
    # 构造请求体（根据实际API文档调整参数）
    return {
        # 模型名称
        "model": "deepseek-ai/DeepSeek-V3",
        "messages": [
            {
                "role": "user",
                "content": userText
            }
        ],
        "stream": False,
        "max_tokens": 512,
        "stop": ["null"],
        "temperature": 0.7,
        "top_p": 0.7,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "response_format": {"type": "text"},
        # "tools": [
        #     {
        #         "type": "function",
        #         "function": {
        #             "description": "<string>",
        #             "name": "<string>",
        #             "parameters": {},
        #             "strict": False
        #         }
        #     }
        # ]
    }


def test1(reqText):
    # 第一个对话
    print(reqText)
    try:
        response = requests.request("POST", apiUrl, json=getTextDeepV3(reqText), headers=getHead())
        # response = requests.post(apiUrl, headers=getHead(), json=getTextDeepV3(reqText))
        # 检查HTTP错误
        response.raise_for_status()
        return response.json()
    except requests.exceptions.RequestException as e:
        print(f"请求失败: {e}")
        return None


def downLoadModel(model):
    # 下载大模型
    from modelscope.hub.snapshot_download import snapshot_download
    # 定义目标目录
    target_directory = AIMODELDIR
    # 指定要下载的模型ID
    model_id = model
    # 下载模型
    download_path = snapshot_download(model_id, cache_dir=target_directory)
    print(f"Model downloaded to: {download_path}")


def loadUseModel(model):
    from modelscope.pipelines import pipeline
    from modelscope.utils.constant import Tasks

    # 替换为实际下载的模型路径
    model_id = '%s/%s' % (AIMODELDIR, model)
    # 创建pipeline用于文本生成任务
    inference_pipeline = pipeline(task=Tasks.text_generation, model=model_id)
    # 进行推理
    result = inference_pipeline('输入文本')
    print(result)


def modelscopeConvertOllama(model_id):
    from transformers import AutoModelForCausalLM, AutoTokenizer

    # 加载模型和分词器
    model = AutoModelForCausalLM.from_pretrained(model_id)
    tokenizer = AutoTokenizer.from_pretrained(model_id)

    # 保存模型和分词器到本地目录，使其成为Hugging Face Transformers兼容的格式
    output_dir = 'D:/Ollama/models'
    model.save_pretrained(output_dir)
    tokenizer.save_pretrained(output_dir)
    print(f"模型和分词器已保存到 {output_dir}")


def modelTask(model_id):
    # 导入必要的模块
    from modelscope.pipelines import pipeline
    from modelscope.models import Model
    from modelscope.utils.constant import Tasks

    # 设置你的模型ID，可以从ModelScope网站上找到

    # 创建模型实例
    model = Model.from_pretrained(model_id)

    # 根据任务类型创建pipeline实例
    # 这里以文本分类为例，如果你的任务类型不同，请修改Tasks中的值
    task = Tasks.text_classification  # 修改为你的任务类型
    classifier = pipeline(task=task, model=model)

    # 使用模型进行预测
    input_text = "这是一个测试句子。"
    result = classifier(input_text)
    print(result)


def modelscopeCmpLlama(mode1, mode2):
    from transformers import AutoModel, AutoTokenizer
    import torch

    # 加载原始模型
    original_model = AutoModel.from_pretrained(mode1)
    original_tokenizer = AutoTokenizer.from_pretrained(mode2)

    # 加载转换后的模型
    converted_model = AutoModel.from_pretrained(mode1)
    converted_tokenizer = AutoTokenizer.from_pretrained(mode2)

    # 准备测试文本
    test_text = "这是一个测试句子。"

    # 使用tokenizer编码文本
    original_inputs = original_tokenizer(test_text, return_tensors='pt')
    converted_inputs = converted_tokenizer(test_text, return_tensors='pt')

    # 获取预测结果
    original_outputs = original_model(**original_inputs)
    converted_outputs = converted_model(**converted_inputs)

    # 这里可以添加更详细的比较逻辑
    if torch.allclose(original_outputs.last_hidden_state, converted_outputs.last_hidden_state, atol=1e-5):
        print("Outputs match!")
    else:
        print("Outputs do not match.")


if __name__ == '__main__':
    if AIMODELDIR == None:
        print('system error')
        exit(-1)
    print('tongyiQwen test')
    # model = 'MiniMaxAI/MiniMax-VL-01'
    # model = 'MiniMaxAI/MiniMax-Text-01'
    # model = 'AI-ModelScope/MiniMax-Text-01'
    # model = 'AI-ModelScope/MiniMax-VL-01'

    # deepseek-vl2
    # model = "deepseek-ai/deepseek-vl2"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-8bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-6bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-4bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-3bit"
    # downLoadModel(model)
    # model = "deepseek-ai/deepseek-vl2-tiny"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-tiny-8bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-tiny-6bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-tiny-4bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-tiny-3bit"
    # downLoadModel(model)
    # model = "deepseek-ai/deepseek-vl2-small"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-small-8bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-small-6bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-small-4bit"
    # downLoadModel(model)
    # model = "mlx-community/deepseek-vl2-small-3bit"
    # downLoadModel(model)

    # Qwen2.5
    # model = 'Qwen/Qwen2.5-7B-Instruct'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-7B-Instruct-8bit'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-7B-Instruct-4bit'
    # downLoadModel(model)

    # model = 'mlx-community/Qwen2.5-7B-Instruct-3bit'
    # downLoadModel(model)
    model = 'Qwen/Qwen2.5-3B-Instruct'
    downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-3B-Instruct-8bit'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-3B-Instruct-4bit'
    # downLoadModel(model)

    # Qwen2.5-VL
    # model = 'Qwen/Qwen2.5-VL-72B-Instruct'
    # model = 'mlx-community/Qwen2.5-VL-72B-Instruct-8bit'
    # model = 'mlx-community/Qwen2.5-VL-72B-Instruct-6bit'
    # model = 'mlx-community/Qwen2.5-VL-72B-Instruct-4bit'
    # model = 'mlx-community/Qwen2.5-VL-72B-Instruct-3bit'
    # downLoadModel(model)

    # model = 'Qwen/Qwen2.5-VL-7B-Instruct'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-VL-7B-Instruct-8bit'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-VL-7B-Instruct-6bit'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-VL-7B-Instruct-3bit'
    # downLoadModel(model)
    # model = 'Qwen/Qwen2.5-VL-3B-Instruct'
    # downLoadModel(model)

    # model = 'mlx-community/Qwen2.5-VL-3B-Instruct-8bit'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-VL-3B-Instruct-6bit'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-VL-3B-Instruct-4bit'
    # downLoadModel(model)
    # model = 'mlx-community/Qwen2.5-VL-3B-Instruct-3bit'
    # downLoadModel(model)

    # loadUseModel(model)
    # modelscopeConvertOllama(model)
    # modelTask(model)
    # model = 'bartowski/Qwen2-VL-7B-Instruct-GGUF'
    # downLoadModel(model)
    # model = 'lmstudio-community/Qwen2-VL-2B-Instruct-GGUF'

    # downLoadModel(model)
    # modelscopeConvertOllama(model)
    # userPrompt = "你好,我是小白"
    # userPrompt = "你好,我是小白,想知道如何学习siliconFlow的API调用"
    # result = test1(userPrompt)
    # 输出结果
    # if result:
    # 假设响应格式为 {"choices": [{"message": {"content": "..."}}]}
    # generated_text = result["choices"][0]["message"]["content"]
    # print("生成的回复：\n", generated_text)
    # saveLog(result, log='api.txt')
    # else:
    #     print("请求未成功。")
