import xml.etree.ElementTree as ET
import json
import logging
import re
import ast
from trafilatura import fetch_url, extract
import pandas as pd
import os
import requests
from json_repair import repair_json

log = logging.getLogger(__name__)

def parse_xml(xml_string, new_lines="\n"*5):
    # Parse the XML string
    root = ET.fromstring(xml_string)
    
    # Initialize an empty dictionary to store the result
    parsed_data = {}
    table_data = {}
    # Iterate over the main tag and find all 'head' and their next sibling 'p'...
    for main in root.findall('main'):
        current_head = None
        paragraphs = []
        tables = []
        
        for elem in main:
            if elem.tag == 'head':
                # If we encounter a new 'head' and we already have a 'current_head', store the previous data
                if current_head:
                    if paragraphs:
                        paragraphs = [current_head]+paragraphs
                        parsed_data[current_head] = new_lines.join(paragraphs).strip()
                    if tables:
                        table_data[current_head] = tables
                # Update current_head and reset paragraphs
                # current_head = elem.text.strip()
                current_head = ' '.join(elem.itertext()).strip()
                paragraphs = []
                tables = []
            
            elif elem.tag == 'table':
                table_html = ET.tostring(elem, encoding='unicode', method='html')
                tables.append(table_html)
            # elif elem.tag not in ['head', 'table']:
            else:
                # Add paragraph text to paragraphs list
                # if elem.text:
                    # paragraphs.append(elem.text.strip())
                paragraphs.append(' '.join(elem.itertext()).strip())
        
        # Add the last head and its associated paragraphs
        if current_head:
            if paragraphs:
                paragraphs = [current_head]+paragraphs
                parsed_data[current_head] = new_lines.join(paragraphs).strip()
            if tables:
                table_data[current_head] = tables
    
    return parsed_data, table_data

def try_parse_ast_to_json(function_string: str) -> tuple[str, dict]:
    """
     # 示例函数字符串
    function_string = "tool_call(first_int={'title': 'First Int', 'type': 'integer'}, second_int={'title': 'Second Int', 'type': 'integer'})"
    :return:
    """

    tree = ast.parse(str(function_string).strip())
    ast_info = ""
    json_result = {}
    # 查找函数调用节点并提取信息
    for node in ast.walk(tree):
        if isinstance(node, ast.Call):
            function_name = node.func.id
            args = {kw.arg: kw.value for kw in node.keywords}
            ast_info += f"Function Name: {function_name}\r\n"
            for arg, value in args.items():
                ast_info += f"Argument Name: {arg}\n"
                ast_info += f"Argument Value: {ast.dump(value)}\n"
                json_result[arg] = ast.literal_eval(value)

    return ast_info, json_result

def try_parse_json_object(input: str) -> tuple[str, dict]:
    """JSON cleaning and formatting utilities."""
    # Sometimes, the LLM returns a json string with some extra description, this function will clean it up.

    result = None
    try:
        # Try parse first
        result = json.loads(input)
    except json.JSONDecodeError:
        log.info("Warning: Error decoding faulty json, attempting repair")

    if result:
        return input, result

    _pattern = r"\{(.*)\}"
    _match = re.search(_pattern, input)
    input = "{" + _match.group(1) + "}" if _match else input

    # Clean up json string.
    input = (
        input.replace("{{", "{")
        .replace("}}", "}")
        .replace('"[{', "[{")
        .replace('}]"', "}]")
        .replace("\\", " ")
        .replace("\\n", " ")
        .replace("\n", " ")
        .replace("\r", "")
        .strip()
    )

    # Remove JSON Markdown Frame
    if input.startswith("```"):
        input = input[len("```"):]
    if input.startswith("```json"):
        input = input[len("```json"):]
    if input.endswith("```"):
        input = input[: len(input) - len("```")]

    try:
        result = json.loads(input)
    except json.JSONDecodeError:
        # Fixup potentially malformed json string using json_repair.
        json_info = str(repair_json(json_str=input, return_objects=False))

        # Generate JSON-string output using best-attempt prompting & parsing techniques.
        try:

            if len(json_info) < len(input):
                json_info, result = try_parse_ast_to_json(input)
            else:
                result = json.loads(json_info)

        except json.JSONDecodeError:
            log.exception("error loading json, json=%s", input)
            return json_info, {}
        else:
            if not isinstance(result, dict):
                log.exception("not expected dict type. type=%s:", type(result))
                return json_info, {}
            return json_info, result
    else:
        return input, result

def get_text_table(model_id="meta-llama/Llama-3.1-8B-Instruct", new_lines="="*10):
    if not os.path.exists(f"./data/{model_id}/text.json") or \
        not os.path.exists(f"./data/{model_id}/table"):
            print(f"./data/{model_id}/text.json或./data/{model_id}/table目录不存在！")
            return None, None
    with open(f"./data/{model_id}/text.json", "r") as f:
        parsed_result = json.load(f)
    table_path = None
    tables = os.listdir(f"./data/{model_id}/table")
    if tables:
        table_path = [f"./data/{model_id}/table/{table}" for table in tables]
    return parsed_result, table_path

def dwonload_text_table(model_id="meta-llama/Llama-3.1-8B-Instruct", new_lines="="*10):
    # 如果已经下载了数据,直接读取文件并返回
    if os.path.exists(f"./data/{model_id}/text.json") and os.path.exists(f"./data/{model_id}/table"):
        print(f"./data/{model_id}/text.json已存在！")
        print(f"./data/{model_id}/table目录已存在！")
        return 
    
    url = f"https://hf-mirror.com/{model_id}"
    downloaded = fetch_url(url)
    # 处理文本数据并按照标题切分段落
    if not os.path.exists(f"./data/{model_id}"):
        os.makedirs(f"./data/{model_id}")
    result = extract(downloaded, output_format="xml", include_links=True)
    parsed_result, table_result = parse_xml(result, new_lines=new_lines)
    with open(f"./data/{model_id}/text.json", "w") as f:
        json.dump(parsed_result, f, indent=4, ensure_ascii=False)
    # print(f"./data/{model_id}/text.json下载成功！")
    # 处理表格数据
    if not os.path.exists(f"./data/{model_id}/table"):
        os.makedirs(f"./data/{model_id}/table")
    try:
        table_path = []
        table_names = list(table_result.keys())
        table_df = pd.read_html(downloaded)
        assert len(table_df) == len(table_names)
        for i, table_name in enumerate(table_names):
            table_df[i].to_csv(f"./data/{model_id}/table/{table_name}.csv", index=False)
            table_path.append(f"./data/{model_id}/table/{table_name}.csv")
        # print(f"./data/{model_id}/table 表格下载成功！表格数量:{len(table_names)}")
    except:
        table_path = None
        # print(f"No table found in the {model_id}")
        
def download_readme(model_id):
    if os.path.exists(f"./data/{model_id}/README.md"):
        print(f"./data/{model_id}/README.md已存在！")
        return 
    # 定义要下载的URL
    url = f"https://hf-mirror.com/{model_id}/resolve/main/README.md?download=true"
    # 发起GET请求下载文件
    response = requests.get(url)
    # 检查请求是否成功
    if response.status_code == 200:
        # 将内容写入README.md文件
        if not os.path.exists(f"./data/{model_id}"):
            os.makedirs(f"./data/{model_id}")
        with open(f"./data/{model_id}/README.md", "wb") as file:
            file.write(response.content)
        return response.content
        # print(f"./data/{model_id}/README.md下载成功！")
    else:
        print(f"下载失败，状态码：{response.status_code} {model_id}")
        return None
        
def parse_markdwon(model_id, new_lines = "\n"):
    # TODO:更好的解析markdown文件的方法
    path = f"./data/{model_id}/README.md"
    if not os.path.exists(path):
        print(f"{path}不存在！")
        return 
    with open(path, 'r') as f:
        lines = f.readlines()
    title_content = {}
    title, content = '', []
    for line in lines:
        if line.startswith('## '):
            if title:
                content = [title] + content
                title_content[title] = new_lines.join(content)
                content = []
            title = line.strip('## ').strip()
        elif line.startswith('### '):
            if title:
                content = [title] + content
                title_content[title] = new_lines.join(content)
                content = []
            title = line.strip('### ').strip()
        elif line.startswith('---'):
            content = []
        else:
            content.append(line.strip())
    if title:
        content = [title] + content
        title_content[title] = new_lines.join(content)
    return title_content

def get_url_content(model_id, new_lines = "\n"):
    other_url_content = {
        "url":[],
        "raw_paragraph":[]
    }
    github_url_content = {
        "url":[],
        "raw_paragraph":[]
    }
    path = f"./data/{model_id}/README.md"
    if not os.path.exists(path):
        print(f"{path}不存在！")
        return 
    with open(path, 'r') as f:
        lines = f.readlines()

    pattern = r'\((https?://[^\s]+)\)'
    for line in lines:
        group = re.findall(pattern, line)
        if group:
            urls = []
            for match in group:
                if 'github.com' in match:
                    github_url_content["raw_paragraph"].append(line.strip())
                    github_url_content["url"].append(match)
                else:
                    other_url_content["raw_paragraph"].append(line.strip())
                    other_url_content["url"].append(match)
                urls.append(match)

    return github_url_content, other_url_content

def get_url_text(model_id: str, readme: str) -> tuple[dict, dict]:
    github_url_content = {
        "url":[],
        "raw_paragraph":[]
    }
    other_url_content = {
        "url":[],
        "raw_paragraph":[]
    }
    
    lines = readme.split("\n")

    pattern = r'\((https?://[^\s]+)\)'
    for line in lines:
        group = re.findall(pattern, line)
        if group:
            for match in group:
                if 'github.com' in match:
                    github_url_content["raw_paragraph"].append(line.strip())
                    github_url_content["url"].append(match)
                else:
                    other_url_content["raw_paragraph"].append(line.strip())
                    other_url_content["url"].append(match)

    return github_url_content, other_url_content

def get_split_text_from_xml(model_id: str, new_lines: str="\n") -> dict:
    """
    new_lines: 拼接子段落的分隔符，默认是换行符
    """
    url = f"https://hf-mirror.com/{model_id}"
    downloaded = fetch_url(url)
    result = extract(downloaded, output_format="xml", include_links=True)
    parsed_result, _ = parse_xml(result, new_lines=new_lines)
    
    return parsed_result

def markdown_to_plain_text(markdown_text):
    # 移除代码块
    markdown_text = re.sub(r"```[\s\S]*?```", "", markdown_text)

    # 移除行内代码
    markdown_text = re.sub(r"`[^`\n]+`", "", markdown_text)

    # 移除链接，保留链接文本
    markdown_text = re.sub(r"\[([^\]]+)\]\([^\)]+\)", r"\1", markdown_text)

    # 移除图片
    markdown_text = re.sub(r"!\[([^\]]*)\]\([^\)]+\)", "", markdown_text)

    # 移除强调标记（加粗、斜体）
    markdown_text = re.sub(r"(\*\*|__)(.*?)\1", r"\2", markdown_text)
    markdown_text = re.sub(r"(\*|_)(.*?)\1", r"\2", markdown_text)

    # 移除标题标记
    markdown_text = re.sub(r"^#+\s*", "", markdown_text, flags=re.MULTILINE)

    # 移除列表标记
    markdown_text = re.sub(r"^\s*[-*+]\s", "", markdown_text, flags=re.MULTILINE)
    markdown_text = re.sub(r"^\s*\d+\.\s", "", markdown_text, flags=re.MULTILINE)

    # 移除块引用标记
    markdown_text = re.sub(r"^>\s", "", markdown_text, flags=re.MULTILINE)

    # 移除水平线
    markdown_text = re.sub(r"^[-*_]{3,}\s*$", "", markdown_text, flags=re.MULTILINE)

    # 移除多余的空行
    markdown_text = re.sub(r"\n{3,}", "\n\n", markdown_text)

    return markdown_text.strip()

def get_split_text_from_readme(model_id: str, readme: str, new_lines: str="\n") -> dict:
    """
    new_lines: 拼接子段落的分隔符，默认是换行符
    """
    title_content = {}
    title, content = ' ', []
    lines = readme.split("\n")
    for line in lines:
        if line.startswith('## '):
            if title:
                content = [title] + content
                contents = markdown_to_plain_text(new_lines.join(content))
                title_content[title] = contents
                content = []
            title = line.strip('## ').strip()
        elif line.startswith('### '):
            if title:
                content = [title] + content
                contents = markdown_to_plain_text(new_lines.join(content))
                title_content[title] = contents
                content = []
            title = line.strip('### ').strip()
        elif line.startswith('---'):
            content = []
        else:
            content.append(line.strip())
    if title:
        content = [title] + content
        contents = markdown_to_plain_text(new_lines.join(content))
        title_content[title] = contents
    return title_content