import os
import time
import sys
import json
import asyncio
import aiohttp
from ebooklib import epub
from bs4 import BeautifulSoup
import re
import importlib.util
import random
import copy
from typing import Dict

CONFIG_DATA={}
translate_cache={}

def init():
    global CONFIG_DATA
    global translate_cache
    translate_cache=load_translate_cache()
    flag=False
    #判断config是否正常
    if not os.path.exists("./config.json") or os.path.getsize("./config.json")==0:
        with open("./config.json","w",encoding="utf-8") as f:
            json.dump({"files":{},"prompt":"","api_key":""},f,indent=4,ensure_ascii=False)
        print("config不存在或为空，已完成修复，程序退出")
        flag=True
    else:#读取config
        with open("./config.json","r",encoding="utf-8") as f:
            CONFIG_DATA=json.load(f)
        print(f"成功读取读取配置文件:{CONFIG_DATA}")
    
        #判断scripts,cache,output是否存在
        if not os.path.isdir("./cache"):
            os.mkdir("cache")
            print("文件夹\"cache\"缺失，已完成修复")
            flag=True
        if not os.path.isdir("./scripts"):
            os.mkdir("scripts")
            print("文件夹\"scripts\"缺失，已完成修复")
            flag=True
        if not os.path.isdir("./output"):
            os.mkdir("output")
            print("文件夹\"output\"缺失，已完成修复")
            flag=True
        else:
            print("文件夹检查完成")
            #检查scripts是否为空
            if len(os.listdir("scripts"))==0:
                print("无可用epub解析脚本，请添加")
                flag=True
            
            #检查脚本是否和config对应
            script_list=os.listdir("scripts")
            for i in range(len(script_list)):
                buf,extend=os.path.splitext(script_list[i])
                script_list[i]=buf
            print(f"现有脚本:{script_list}")
            for key,value in CONFIG_DATA["files"].items():
                if not key in script_list:
                    print("config错误：不存在的解析脚本")
                    flag=True
            
            #检测translate_cache和word_mapping是否存在
            if not os.path.exists("./cache/translate_cache.json") or os.path.getsize("./cache/translate_cache.json")==0:
                with open("./cache/translate_cache.json","w",encoding="utf-8") as f:
                    f.write("{}")

            if not os.path.exists("./cache/word_mapping.json") or os.path.getsize("./cache/word_mapping.json")==0:
                with open("./cache/word_mapping.json","w",encoding="utf-8") as f:
                    f.write("{}")
            
            print("初始化完成")

    if flag==True:
        time.sleep(1)
        sys.exit(0)


def import_module_from_file(file_path, module_name=None):
    # 如果没有指定模块名，则使用文件名（去掉 .py 后缀）
    if module_name is None:
        module_name = os.path.splitext(os.path.basename(file_path))[0]
    
    try:
        # 1. 创建模块的 "规格" (spec)
        spec = importlib.util.spec_from_file_location(module_name, file_path)
        if spec is None:
            raise FileNotFoundError(f"无法为文件 {file_path} 创建模块规格")
        
        # 2. 根据规格创建模块对象
        module = importlib.util.module_from_spec(spec)
        
        # 3. 将模块添加到 sys.modules 缓存中（可选，但推荐）
        sys.modules[module_name] = module
        
        # 4. 执行模块的代码（这才是真正导入并执行文件内容的地方）
        spec.loader.exec_module(module)
        
        return module
        
    except Exception as e:
        print(f"导入模块 {file_path} 时出错: {e}")
        return None

#cache操作 
def load_translate_cache():
    with open("./cache/translate_cache.json","r",encoding="utf-8") as f:
        return json.load(f)
    
def save_translate_cache(new_data):
    with open("./cache/translate_cache.json","w",encoding="utf-8") as f:
        json.dump(new_data,f,indent=4,ensure_ascii=False)

def load_mapping():
    with open("./cache/word_mapping.json", 'r', encoding='utf-8') as f:
        return json.load(f)

def save_mapping(cache_dict):
    with open('./cache/word_mapping.json', 'w', encoding='utf-8') as f:
        json.dump(cache_dict, f, ensure_ascii=False, indent=4)

def clear_translate_cache():
    with open("./cache/translate_cache.json","w",encoding="utf-8") as f:
        json.dump({},f,indent=4,ensure_ascii=False)

#异步翻译
async def fetch(session,data):
    payload = {
        "model": "Pro/deepseek-ai/DeepSeek-R1",
        "messages": [{"role": "user", "content": f"{data}"}]
    }
    headers = {
        "Authorization": f"Bearer {CONFIG_DATA['api_key']}",
        "Content-Type": "application/json"
    }

    async with session.post("https://api.siliconflow.cn/v1/chat/completions", headers=headers, json=payload) as response:
        print(response.status)#打印状态码
        return await response.text()#注意返回的是json而不是python字典

async def translate_main(books_data):
    async with aiohttp.ClientSession(
        connector=aiohttp.TCPConnector(limit_per_host=20),
        timeout=aiohttp.ClientTimeout(total=1800)
    ) as session: 
        async def fetch_with_limit(book,title,content):
            print(f"电子书\"{book}\"-章节\"{title}\"加入翻译队列")
            # 微小随机延迟代替固定等待
            await asyncio.sleep(random.uniform(0.1, 0.5))
            resp = await fetch(session,CONFIG_DATA["prompt"]+content)
            print(f"电子书\"{book}\"-章节\"{title}\"翻译完成")

            if not book in translate_cache:
                translate_cache[book]={}
            translate_cache[book][title]=json.loads(resp)["choices"][0]["message"]["content"]
            save_translate_cache(translate_cache)

            return book,title,json.loads(resp)["choices"][0]["message"]["content"]

        tasks = []
        for bookname,bookdata in books_data.items():
            print(f"正在翻译{bookname}")
            for title,content in bookdata.items():
                tasks.append(asyncio.create_task(fetch_with_limit(os.path.splitext(bookname)[0],title,content)))
        
        results = await asyncio.gather(*tasks)
        print("全部翻译完成")
        return results
    
def sort_chapter_dict(chapter_dict):
    def sort_key(key):
        if key == "Prologue":
            return (0, 0)
        elif key.startswith("Chapter "):
            try:
                num = int(key.split(" ", 1)[1])
                return (1, num)
            except (IndexError, ValueError):
                return (2, 0)
        else:
            return (2, 0)

    # 按照排序后的键重建字典
    sorted_keys = sorted(chapter_dict.keys(), key=sort_key)
    sorted_dict = {key: chapter_dict[key] for key in sorted_keys}
    return sorted_dict
    
def replace_bracketed_words(text, cache_dict):
    """替换文本中被{}包围的词语"""
    # 定义正则表达式模式
    pattern = r'\{([^{}]+)\}'
    
    # 查找所有匹配项
    matches = re.findall(pattern, text)
    
    # 对每个匹配项进行处理
    for key in set(matches):  # 使用set去重
        # 优先从缓存中查找
        if key in cache_dict:
            replacement = cache_dict[key]
        else:
            # 请求用户输入
            user_input = input(f"请输入 '{key}' 的替换内容（直接回车保留原词）: ").strip()
            replacement = user_input if user_input else key
            # 更新缓存
            cache_dict[key] = replacement
        save_mapping(cache_dict)
        # 替换所有匹配项（包括花括号）
        text = re.sub(r'\{\s*' + re.escape(key) + r'\s*\}', replacement, text)
    
    return text

def dict_to_epub(title: str, author: str, chapters: Dict[str, str], output_path: str):
    """
    将字典格式的章节内容转换为EPUB电子书
    
    参数:
        title: 书籍标题
        author: 作者姓名
        chapters: 字典格式的章节内容，格式为 {章节标题: 章节内容}
        output_path: 输出EPUB文件的路径
    """
    # 创建EPUB书籍对象
    book = epub.EpubBook()
    
    # 设置书籍元数据
    book.set_identifier('id_' + title.replace(' ', '_'))  # 简单生成一个唯一标识符
    book.set_title(title)
    book.set_language('zh')  # 假设是中文书籍
    book.add_author(author)
    
    # 创建章节列表
    epub_chapters = []
    
    # 为每个章节创建EpubHtml对象
    for i, (chapter_title, chapter_content) in enumerate(chapters.items(), 1):
        # 创建章节
        chapter = epub.EpubHtml(
            title=chapter_title,
            file_name=f'chapter_{i}.xhtml',
            lang='zh'
        )
        
        # 设置章节内容
        chapter.content = f"""
        <html>
            <head>
                <title>{chapter_title}</title>
            </head>
            <body>
                <h1>{chapter_title}</h1>
                <div>{chapter_content}</div>
            </body>
        </html>
        """
        
        # 添加到书籍和章节列表
        book.add_item(chapter)
        epub_chapters.append(chapter)
    
    # 定义书籍的目录结构
    book.toc = tuple(epub_chapters)
    
    # 添加导航文件
    book.add_item(epub.EpubNcx())
    book.add_item(epub.EpubNav())
    
    # 定义基本样式
    style = '''
    @namespace epub "http://www.idpf.org/2007/ops";
    body {
        font-family: Cambria, Liberation Serif, serif;
    }
    h1 {
        text-align: center;
        text-transform: uppercase;
        font-weight: 200;
    }
    '''
    
    # 添加CSS样式
    nav_css = epub.EpubItem(
        uid="style_nav",
        file_name="style/nav.css",
        media_type="text/css",
        content=style
    )
    book.add_item(nav_css)
    
    # 设置书籍的阅读顺序
    book.spine = ['nav'] + epub_chapters
    
    # 写入EPUB文件
    epub.write_epub(output_path, book, {})
    print(f"EPUB文件已成功生成: {output_path}")

if __name__=="__main__":
    init()#初始化

    while True:
        translate_cache=load_translate_cache()
        books_data={}
        #读取电子书（动态脚本）
        for key,value in CONFIG_DATA["files"].items():
            for file in value:
                books_data[os.path.basename(file)]={}
                print(file+":")
                module=import_module_from_file("./scripts/"+key+".py")
                for title,content in module.extract_chapters(file).items():
                    if title not in translate_cache.get(os.path.splitext(os.path.basename(file))[0], {}):
                        books_data[os.path.basename(file)][title]=content
                print(f"已读取电子书\"{file}\"的分章节内容")

        #异步并发翻译请求
        asyncio.run(translate_main(books_data))

        #给章节排序
        buf=load_translate_cache()
        for bookname,bookdata in buf.items():
            buf[bookname]=sort_chapter_dict(bookdata)
        save_translate_cache(buf)

        #重翻译
        if input("请检查translate_cache.json，是否需要重新翻译(y/n)：")=="y":
            buf=load_translate_cache()
            new_buf=copy.deepcopy(buf)
            print("重翻书入开始，遵循规则：“留空为无，如果多个章节用\"英文逗号\"隔开，输入\"full\"为重翻译整本书”")
            for bookname,bookdata in buf.items():
                user_input=input(f"请输入{bookname}需要重新翻译的章节：")
                if user_input=="full":
                    del new_buf[bookname]
                elif user_input!="":
                    for chapter in user_input.split(','):
                        del new_buf[bookname][chapter.strip()]
            save_translate_cache(new_buf)
        else:
            break

    #手动替换专有名词
    final_data={}
    translated_dict=load_translate_cache()
    for bookname,bookdata in translated_dict.items():
        print(f"手动替换书{bookname}中的专有名词：")
        final_data[bookname]={}
        for title,content in bookdata.items():
            cache_dict=load_mapping()
            final_data[bookname][title]=replace_bracketed_words(content, cache_dict)#密码的!!!!!!!!
            print(f'第{title}章翻译完成')
    

    print(final_data)
    #输出epub
    for bookname,bookdata in final_data.items():
        dict_to_epub(bookname,"Erin Hunter",bookdata,f"./output/{bookname}.epub")
        print(f"输出{bookname}中文版的epub文件")#暂时用md替代

    clear_translate_cache()