from typing import List
from bs4 import BeautifulSoup, element, Tag
import os
import re
import requests
import httpx
from langchain_community.document_loaders import WebBaseLoader
from tqdm import tqdm
import pandas as pd
from functools import reduce
import asyncio
import nest_asyncio
nest_asyncio.apply()

domain = "https://so.gushiwen.cn"
root_dir = "tangshi"
shiwen_dir = f"{root_dir}/shiwen"
shiwen_extend_dir = f"{shiwen_dir}/extend"
author_dir = f"{root_dir}/author"
main_url = f"{domain}/gushi/tangshi.aspx"

loaded_poetries = {}
loaded_authors = {}
class ExtendItem:
    def __init__(self, tag: str):
        self.tag = tag
        self.content = ""

    def __str__(self) -> str:
        return f"{self.tag}:{self.content}"

    def setContent(self, content: str):
        self.content = content


class Author:
    def __init__(self):
        self.name = ""
        self.dynasty = ""
        self.intro = ""
        self.extends: List[ExtendItem] = []
        
    def __str__(self) -> str:
        return f"{self.name}-{self.dynasty} 扩展{len(self.extends)}"

    def setName(self, name: str):
        self.name = name

    def setDynasty(self, dynasty: str):
        self.dynasty = dynasty
    
    def setIntro(self, intro: str):
        self.intro = intro

    def appendExtend(self, ext: ExtendItem):
        self.extends.append(ext)

    def extendExtend(self, exts: List[ExtendItem]):
        self.extends.extend(exts)
    
    def as_node(self):
        ret ={ "姓名": self.name, "朝代":  self.dynasty, "简介": self.intro }
        ret["扩展"] = ''.join([item.__str__() for item in self.extends])
        return ret
        

class Poetry:
    def __init__(self):
        self.title = ""
        self.author: Author | None = None
        self.content = ""
        self.extends: List[ExtendItem] = []

    def __str__(self) -> str:
        return f"《{self.title}》 作者 {self.author.name} {self.author.dynasty}  字数{self.countWord()} 扩展{len(self.extends)}\n {self.content}"

    def setTitle(self, title: str):
        self.title = title

    def setAuthor(self, author: Author | None):
        self.author = author

    def setContent(self, content: str):
        self.content = content

    def appendExtend(self, ext: ExtendItem):
        self.extends.append(ext)

    def extendExtend(self, exts: List[ExtendItem]):
        self.extends.extend(exts)

    def countWord(self):
        return len(self.content)
    
    def as_node(self):
        ret ={ "标题": self.title, "作者":  self.author.name, "正文": self.content }
        ret["扩展"] = ''.join([item.__str__() for item in self.extends])
        return ret
    

def remove_empty(v: str):
    return re.sub("\s", "", v)

def get_html(save_path: str, url: str):
    dir_name = os.path.dirname(save_path)
    os.makedirs(dir_name, mode=0o777, exist_ok=True)
    exist = os.path.isfile(save_path)
    if not exist:
        print(f"=DEBUG= local file <{save_path}> not exist ,remote request")
        try:
            r = requests.get(url)
            r.raise_for_status()
            r.encoding = r.apparent_encoding
            if save_path != "":
                with open(save_path, "w") as file:
                    file.write(r.text)
            return r.text
        except Exception as e:
            print("=DEBUG= request error:", e)
            return ""
    else:
        # print(f"=DEBUG= read local file <{save_path}>")
        with open(save_path, "rt") as file:
            return file.read()

async def aget_html(cli: httpx.AsyncClient, save_path: str, url: str):
    dir_name = os.path.dirname(save_path)
    os.makedirs(dir_name, mode=0o777, exist_ok=True)
    exist = os.path.isfile(save_path)
    if not exist:
        print(f"=DEBUG= local file <{save_path}> not exist ,remote request")
        try:
            r = await cli.get(url)
            if save_path != "":
                with open(save_path, "w") as file:
                    file.write(r.text)
            return r.text
        except Exception as e:
            print("=DEBUG= request error:", e)
            return ""
    else:
        # print(f"=DEBUG= read local file <{save_path}>")
        with open(save_path, "rt") as file:
            return file.read()

def parse_index_html(html: str, selector: str):
    root = BeautifulSoup(html, "html.parser")
    links = root.select(selector)
    return [(link.text, f"{domain}{link.attrs['href']}") for link in links]


def format_remote_extend_url(raw_url:str) -> str:
    # print("^" * 20)
    # print("raw_url:", raw_url)
    ret_url = raw_url
    if "fanyiShow" in raw_url:
        pattern = re.compile(r"fanyiShow\((\d+),\W*'(\w+)'\)")
        groups = pattern.search(raw_url)
        id = groups[1]
        idjm = groups[2]
        ret_url = f"{domain}/nocdn/ajaxfanyi.aspx?id={id}&idjm={idjm}"
    elif "shangxiShow" in raw_url:
        pattern = re.compile(r"shangxiShow\((\d+),\W*'(\w+)'\)")
        groups = pattern.search(raw_url)
        id = groups[1]
        idjm = groups[2]
        ret_url = f"{domain}/nocdn/ajaxshangxi.aspx?id={id}&idjm={idjm}"
    elif "ziliaoShow" in raw_url:
        pattern = re.compile(r"ziliaoShow\((\d+),\W*'(\w+)'\)")
        groups = pattern.search(raw_url)
        id = groups[1]
        idjm = groups[2]
        ret_url = f"{domain}/authors/ajaxziliao.aspx?id={idjm}"
            
    # print("ret_url:", ret_url)
    return ret_url

def extract_local_extend(el: Tag, from_remote:bool = False):
    local_extend = el.find("h2")
    if local_extend:
        tagName = local_extend.text
        extend_contents = [c.text for c in el.find_all("p")]
        extend_contents2 = [c.text for c in el.select(".cankao")]
        extend_contents.extend(extend_contents2)
        extend_content = remove_empty("".join(extend_contents)) #re.sub("\s", "", "".join(extend_contents))
        # print("<<" * 5, extend_content)
        extend = ExtendItem(tagName)
        extend.setContent(extend_content)
        return extend
    # else:
    #     print("not found h2.")

def parse_author_extend(el: Tag):
    _remote_extend_urls = set()
    author_extends = []
    next_sibling = el.next_sibling
    while next_sibling:
        if type(next_sibling) == Tag:
            classVal = (
                next_sibling.attrs["class"] if next_sibling.has_attr("class") else ""
            )
            styleVal = (
                next_sibling.attrs["style"] if next_sibling.has_attr("style") else ""
            )
            if "title" in classVal:
                break
            if "display:none;" in styleVal:
                next_sibling = next_sibling.next_sibling
                continue
            if "sons" in classVal:
                # print("-" * 20)
                remote_extend = next_sibling.select_one("[href*='Show(']")
                if remote_extend:
                    formatted_url = format_remote_extend_url(remote_extend.attrs["href"])
                    _remote_extend_urls.add(formatted_url)
                else:
                    extend = extract_local_extend(next_sibling)
                    if extend:
                        author_extends.append(extend)
        next_sibling = next_sibling.next_sibling
    
    if len(_remote_extend_urls) > 0:
        # print("*"*30, " author ", "*" *30)
        loop = asyncio.get_event_loop()
        cli = httpx.AsyncClient() 
        bg_tasks = [
            loop.create_task(aget_html(cli, f"{shiwen_extend_dir}/{os.path.basename(url)}.html", url))
            for url in _remote_extend_urls
        ]
        task = asyncio.gather(*bg_tasks)  # , loop=loop)
        result = loop.run_until_complete(task)
        # print('async results:', len(result))
        for seg in result:
            node = BeautifulSoup(seg, "html.parser")
            extend = extract_local_extend(node, True)
            if extend:
                author_extends.append(extend)
        
    # print("author_extends==>", author_extends[0])
    return author_extends

def parse_author(el: Tag):
    author = Author()
    
    author_name = remove_empty(el.select_one("h1").text.replace("\n", ""))
    author.setName(author_name)
    intro = remove_empty(el.select_one(".cont p").text.replace("\n", ""))
    # print("intro:", intro)
    author.setIntro(intro)
    
    extends = parse_author_extend(el)
    author.extendExtend(extends)
    return author

def parse_poetry_extend(el: Tag):
    _remote_extend_urls = set()
    poetry_extends = []
    next_sibling = el.next_sibling
    while next_sibling:
        if type(next_sibling) == Tag:
            classVal = (
                next_sibling.attrs["class"] if next_sibling.has_attr("class") else ""
            )
            if "sonspic" in classVal:
                break
            if "sons" in classVal:
                # print("-" * 20)
                remote_extend = next_sibling.select_one("[href*='Show(']")
                if remote_extend:
                    formatted_url = format_remote_extend_url(remote_extend.attrs["href"])
                    _remote_extend_urls.add(formatted_url)
                    # print(next_sibling)
                    # print("_remote_extend_urls:", _remote_extend_urls)
                else:
                    extend = extract_local_extend(next_sibling)
                    if extend:
                        poetry_extends.append(extend)
                    # local_extend = next_sibling.find("h2")
                    # if local_extend:
                    #     tagName = local_extend.text
                    #         # print(">>" * 5, tagName)
                    #     # print(next_sibling)
                    #     extend_contents = [c.text for c in next_sibling.find_all("p")]
                    #     extend_content = re.sub("\s", "", "".join(extend_contents))
                    #     # print("<<" * 5, extend_content)
                    #     extend = PoetryExtend(tagName)
                    #     extend.setExtendContent(extend_content)
                    #     poetry_extends.append(extend)
                            
                           
                # print()
        next_sibling = next_sibling.next_sibling
    
    if len(_remote_extend_urls) > 0:
        # print("*"*30)
        loop = asyncio.get_event_loop()
        cli = httpx.AsyncClient() 
        bg_tasks = [
            loop.create_task(aget_html(cli, f"{shiwen_extend_dir}/{os.path.basename(url)}.html", url))
            for url in _remote_extend_urls
        ]
        task = asyncio.gather(*bg_tasks)  # , loop=loop)
        result = loop.run_until_complete(task)
        # print('async results:', len(result))
        for seg in result:
            node = BeautifulSoup(seg, "html.parser")
            extend = extract_local_extend(node, True)
            if extend:
                poetry_extends.append(extend)
        
    # print("poetry_extends==>", poetry_extends[1])
    return poetry_extends


def parse_poetry(yuanwen: Tag):
    
    poetry = Poetry()

    if yuanwen != None:
        title = yuanwen.select_one("h1").text
        poetry.setTitle(title)
        a_author_name = yuanwen.select_one(".cont .source a[href^='/authorv_']")
        author_url = (
            a_author_name.attrs["href"]
            if a_author_name.attrs["href"].startswith("https://")
            else f"{domain}{a_author_name.attrs['href']}"
        )
        if author_url in loaded_authors:
            author = loaded_authors[author_url]
        else:
            author_html = get_html(f"{author_dir}/{os.path.basename(author_url)}.html", author_url)
            author = parse_author_html(author_html)
            loaded_authors[author_url] = author
        # print("author_url:", author_url)

        dynasty = yuanwen.select_one(".cont .source a[href*='?cstr=']").text
        # print("dynasty:", dynasty)
        author.setDynasty(dynasty)
        poetry.setAuthor(author)
        
        div_poetry_content = yuanwen.select_one(".cont .contson")
        poetry_content = remove_empty(div_poetry_content.text.replace("<br/>", ""))
        # print("poetry_content:", poetry_content)
        poetry.setContent(poetry_content)
        extends = parse_poetry_extend(yuanwen)
        poetry.extendExtend(extends)
    return poetry

def parse_author_html(html: str):
    root = BeautifulSoup(html, "html.parser")
    yuanwen = root.select_one("#sonsyuanwen")
    author = parse_author(yuanwen)
    return author

def parse_poetry_html(html: str):
    root = BeautifulSoup(html, "html.parser")
    el = root.select_one("#sonsyuanwen")
    poetry = parse_poetry(el)
    return poetry


def load_poetries(poetries):
    for title, url in tqdm(poetries): #poetries[:1]
        # print(f"title=>{title}")
        # print(f"url=>{os.path.basename(url)}")
        if url in loaded_poetries:
            # print(f"poetry loaded! ({url})")
            continue
        poetry_html = get_html(f"{shiwen_dir}/{os.path.basename(url)}.html", url)
        poetry = parse_poetry_html(poetry_html)
        loaded_poetries[url] = poetry
    print(f"loaded_poetries:{len(loaded_poetries)}")
    print(f"loaded_authors:{len(loaded_authors)}")
    # loader = WebBaseLoader(
    #     web_paths=poetry_urls,
    #     bs_kwargs=dict(parse_only=SoupStrainer(class_=("sons"))),
    # )

def save_to_csv():
    df_authors = pd.DataFrame([loaded_authors[k].as_node() for k in loaded_authors])
    df_authors.to_csv(f"{root_dir}/tangshi-author.csv", quoting=1, index=False)
    
    df_poetries = pd.DataFrame([loaded_poetries[k].as_node() for k in loaded_poetries])
    df_poetries.to_csv(f"{root_dir}/tangshi-poetry.csv", quoting=1, index=False)
    
if __name__ == "__main__":
    html = get_html(f"{root_dir}/tangshi-index.html", main_url)
    # print("html=>", html)
    poetries = parse_index_html(html, ".left .sons a")
    # print(poetries)
    load_poetries(poetries)
    save_to_csv()
