# from transformers import AutoModelForCausalLM, AutoTokenizer

# # 设置你想要保存模型的本地路径
# model_save_path = "D:/ai-daily/backend/models/qwen3-7b"

# # 从 Hugging Face 下载模型和 tokenizer，并加载进内存
# print("正在从 Hugging Face 下载模型...")
# tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-1.7B")
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-1.7B")

# # 将模型保存到你指定的本地路径
# print(f"正在将模型保存到：{model_save_path}")
# tokenizer.save_pretrained(model_save_path)
# model.save_pretrained(model_save_path)

# print("✅ 模型下载并保存完成！")

# from models.loader import LocalModel

# llm = LocalModel()
# response = llm.generate("请描述一下你对下雨天的感受")
# print(response)

import requests

api_key = 'sk-9542adb9e87044128a805f7ce5441db4'  # 替换为你真实的 API Key
url = 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation'

headers = {
    'Authorization': f'Bearer {api_key}',
    'Content-Type': 'application/json',
}

data = {
    "model": "qwen-turbo",
    "input": {
        "prompt": "你好，介绍一下你自己。"
    },
    "parameters": {
        "max_tokens": 128,
        "temperature": 0.7
    }
}

response = requests.post(url, headers=headers, json=data)

print("状态码:", response.status_code)
print("响应内容:", response.text)