import re
import json

import requests
from bs4 import BeautifulSoup

import database
from entities import *


def text_with_newlines(elem):
    text = ''
    for e in elem.descendants:
        if isinstance(e, str):
            text += e.strip()
        elif e.name == 'br' or e.name == 'p':
            text += '\n'
    return text

def get_poem(url: str) -> Poem:
    response = requests.get(url)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    title = soup.find('div', class_='contson').find_previous('h1').text.strip()
    author_name = soup.find('p', class_='source').a.text.strip()
    author_url = 'https://so.gushiwen.cn' + soup.find('p', class_='source').a['href']
    dynasty = soup.find('p', class_='source').find_all('a')[1].text.strip()
    dynasty = dynasty.replace('〔', '').replace('〕', '')
    # 提取 #sonsyuanwen .contson 的 text
    content = soup.find('div', class_='contson').get_text(separator='\n', strip=True)

    # 译文及注释
    if soup.find('h2', text='译文及注释') is None:
        translation = None
        annotation = None
    else:
        trans_anno_div = soup.find('h2', text='译文及注释').find_parent('div').find_parent('div')
        expand_a = trans_anno_div.find('a', text='展开阅读全文 ∨')
        if expand_a:
            js = expand_a['href']
            # javascript:fanyiShow(1637,'609C68A9E4B69D12')
            # https://so.gushiwen.cn/nocdn/ajaxfanyi.aspx?id=1637&idjm=609C68A9E4B69D12
            poem_id = js[js.index('(') + 1:js.index(',')]
            poem_idjm = js[js.index(',') + 2:-2]
            html = requests.get(f'https://so.gushiwen.cn/nocdn/ajaxfanyi.aspx?id={poem_id}&idjm={poem_idjm}').text
            trans_anno_soup = BeautifulSoup(html, 'html.parser')
        else:
            trans_anno_soup = soup
        if trans_anno_soup.find(text=re.compile(r'\s*译文\s*$')):
            translation = trans_anno_soup.find(text=re.compile(r'\s*译文\s*$')).parent.parent.get_text(separator='\n', strip=True)
            translation = translation.replace('译文', '', 1).replace('▲', '', 1).strip()
        else:
            translation = None
        if trans_anno_soup.find(text='注释'):
            annotation = trans_anno_soup.find(text='注释').parent.parent.get_text(separator='\n', strip=True)
            annotation = annotation.replace('注释', '', 1).replace('▲', '', 1).strip()
        else:
            annotation = None

    # 赏析
    # 选择 text=赏析 的 h2 的父节点的父节点的 text
    try:
        appr_div = soup.find('h2', text='赏析').find_parent('div').find_parent('div')
    except:
        try:
            appr_div = soup.find('h2', text='鉴赏').find_parent('div').find_parent('div')
        except:
            try:
                appr_div = soup.find('h2', text='简析').find_parent('div').find_parent('div')
            except:
                appr_div = None
    if appr_div is not None:
        expand_a = appr_div.find('a', text='展开阅读全文 ∨')
        if expand_a:
            js = expand_a['href']
            # javascript:shangxiShow(1257,'F8BE40B64884E9CF')
            # https://so.gushiwen.cn/nocdn/ajaxshangxi.aspx?id=1257&idjm=F8BE40B64884E9CF
            poem_id = js[js.index('(') + 1:js.index(',')]
            poem_idjm = js[js.index(',') + 2:-2]
            appr_html = requests.get(f'https://so.gushiwen.cn/nocdn/ajaxshangxi.aspx?id={poem_id}&idjm={poem_idjm}').text
            appreciation = text_with_newlines(BeautifulSoup(appr_html, 'html.parser').div)
        else:
            appreciation = appr_div.get_text(separator='\n', strip=True)
        appreciation = appreciation.replace('赏析', '', 1).strip()
        appreciation = appreciation.replace('▲', '', 1).strip()
    else:
        appreciation = None

    author = database.db.read_author_by_name(author_name)
    if not author:
        author = Author(name=author_name, dynasty=dynasty, desc=get_author_desc(author_url))
        author = database.db.create_author(author)

    # 跳过已有的诗
    ori_poem = database.db.read_poem_by_title(title)
    if ori_poem and ori_poem.author_id == author.author_id:
        print(f'Poem {title} already exists in database')
        return ori_poem
    poem = Poem(
        title=title,
        author_id=author.author_id,
        content=content,
        translation=translation,
        annotation=annotation,
        appreciation=appreciation
    )
    poem = database.db.create_poem(poem)

    return poem


def get_author_desc(url: str) -> str:
    response = requests.get(url)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    try:
        container = soup.find(id='sonsyuanwen').find('div', class_='cont')
        desc = container.p.get_text(separator='\n', strip=True)
        # ► 31篇诗文　► 1396条名句
        desc = re.sub(r'► \d+篇诗文\s*► \d+条名句', '', desc)
    except:
        return None
    return desc

if __name__ == '__main__':
    url = 'https://so.gushiwen.cn/shiwenv_f5714bcd33e3.aspx'
    poem = get_poem(url)
    poem = get_poem(url)
    print(poem)