import os
import json
from openai import OpenAI
from datetime import datetime
import glob
from dotenv import load_dotenv



env_path = os.path.join(os.path.dirname(__file__), '..', '..', 'ak.envs')
load_dotenv(dotenv_path=env_path)


TOKEN_LIMIT = 5000


def get_total_tokens_used():
    history_dir = os.path.join(os.path.dirname(__file__), "使用历史记录")
    if not os.path.exists(history_dir):
        return 0

    total_tokens = 0
    json_files = glob.glob(os.path.join(history_dir, "chat_*.json"))

    for json_file in json_files:
        try:
            with open(json_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
                total_tokens += data.get("Token使用情况", {}).get("总计", 0)
        except:
            continue

    return total_tokens

current_time=datetime.now()
current_hour=int(current_time.hour)
if 5 <= current_hour < 12:
    period = "早上"
    text="嗷呜（打哈欠），准备好开始一段新的冒险了吗，骚年(ʃƪಡ ꇴ ಡ)"
elif 12 <= current_hour < 14:
    period = "中午"
    text="今儿有点热啊，不过天气倒是不错哈，要不来瓶冰阔落ᕦ(΄◞ิ ꇴ ◟ิ‵)"
elif 14 <= current_hour < 18:
    period = "下午"
    text="Bonjour，（呦)，一眨眼竟然到下午了，要开始卷喽(ㅍ△ㅍ๑)"
elif 18 <= current_hour < 22:
    period = "晚上"
    text="月明星稀，乌鹊南飞，精彩的一天就要落幕喽，让我们一起回忆下精彩的一天吧( ͡° ͜ʖ ͡°)✧"
else:
    period = "凌晨"
    text="还没睡还在卷呐，还是因为哪个小女生/小帅哥搁着网抑云呐，哎呀，不哭不哭，跟我聊聊呗，我的嘴就是肉包子打狗，有去无回٩(๛ ˘ ³˘)۶"

question=input(f"{period}好，{text},请输入您的问题：")


tokens_used = get_total_tokens_used()
print(f"\n📊 当前已使用 Token: {tokens_used}/{TOKEN_LIMIT}")

if tokens_used >= TOKEN_LIMIT:
    print(f"\n❌ Token 使用量已达到限制 ({TOKEN_LIMIT})！")
    print("为了控制成本，服务已暂停。")
    print(f"累计使用: {tokens_used} tokens")
    print("\n如需继续使用，请联系管理员调整限制或清理历史记录。")
    exit(0)

remaining_tokens = TOKEN_LIMIT - tokens_used
if remaining_tokens < 200:
    print(f"⚠️  警告：剩余 Token 不足 ({remaining_tokens})，可能无法完成本次对话！")
    confirm = input("是否继续？(y/n): ")
    if confirm.lower() != 'y':
        print("已取消操作。")
        exit(0)

client = OpenAI(

    api_key=os.getenv("DASHSCOPE_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

completion = client.chat.completions.create(


    model="qwen-turbo",
    messages=[
        {"role": "system", "content": "You are a French teacher. Help students learn French."},
        {"role": "user", "content": f"{question}"},
    ],


)

ai_response = completion.choices[0].message.content
print("\n" + "="*50)
print("AI 回复：")
print("="*50)
print(ai_response)
print("="*50)


print(f"\n使用的 tokens: {completion.usage.total_tokens} (提问: {completion.usage.prompt_tokens}, 回复: {completion.usage.completion_tokens})")


history_dir = os.path.join(os.path.dirname(__file__), "使用历史记录")
if not os.path.exists(history_dir):
    os.makedirs(history_dir)


history_data = {
    "时间戳": current_time.strftime("%Y-%m-%d %H:%M:%S"),
    "时段": period,
    "会话ID": completion.id,
    "模型": completion.model,
    "对话内容": {
        "系统提示": "You are a French teacher. Help students learn French.",
        "用户问题": question,
        "AI回答": ai_response
    },
    "Token使用情况": {
        "总计": completion.usage.total_tokens,
        "提问消耗": completion.usage.prompt_tokens,
        "回复消耗": completion.usage.completion_tokens
    }
}


filename = f"chat_{current_time.strftime('%Y%m%d_%H%M%S')}.json"
filepath = os.path.join(history_dir, filename)


with open(filepath, 'w', encoding='utf-8') as f:
    json.dump(history_data, f, ensure_ascii=False, indent=2)

print(f"\n✅ 对话已保存至: {filepath}")


new_total_tokens = get_total_tokens_used()
remaining = TOKEN_LIMIT - new_total_tokens
percentage = (new_total_tokens / TOKEN_LIMIT) * 100

print(f"\n📊 Token 使用统计:")
print(f"   累计使用: {new_total_tokens}/{TOKEN_LIMIT} ({percentage:.1f}%)")
print(f"   剩余可用: {remaining} tokens")

if remaining < 500:
    print(f"   ⚠️  警告：剩余 Token 不足，请注意！")
elif remaining < 1000:
    print(f"   ⚡ 提示：Token 即将用完")
