# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests

from db.poety import Poety
from db.todo import TodoUrl
from db.finish import FinishUrl

base_url = 'https://www.gushiwen.org'
# seeds = ['/shiwen/default_0A0A1.aspx',]
seed_url = '{}/shiwen/default_0A0A1.aspx'.format(base_url)


def parse_left(html):
    soup = BeautifulSoup(html, 'html5lib')
    main3 = soup.find('div', class_='main3')
    left = main3.find('div', class_="left")
    return left


def parse_sons_list(left):
    cont_list = left.find_all('div', class_='cont')
    return cont_list


def parse_pages(left):
    pages = left.find('div', class_='pages')
    a_list = pages.find_all('a')
    for a in a_list:
        # 需要访问的链接
        url = "{}{}".format(base_url, a['href'])
        print(url)
        finishUrl = FinishUrl(url)
        todoUrl = TodoUrl(url)
        if finishUrl.existUrl():
            pass
        else:
            if todoUrl.existUrl():
                pass
            else:
                print('save to todo url list')
                todoUrl.save()
    return pages


def parse_cont(cont_list):
    for cont in cont_list:
        # 诗名 
        title = cont.find('p')
        # 朝代 作者
        p_source = cont.find('p', class_='source')
        if p_source is not None:
            a_list = p_source.find_all('a')
            dynasty = a_list[0].text
            author = a_list[1].text
        else:
            dynasty = '未知'
            author = '未知'

        # 正文
        content = cont.find('div', class_='contson')

        if title is not None and content is not None:
            poety = Poety(title.text, dynasty, author, content.text)
            poety.save()
            print(poety)


def main():
    while True:
        todoUrlList = TodoUrl.findUrlList()
        if len(todoUrlList) > 0:
            for url in todoUrlList:
                print(url['url'])
                r = requests.get(url['url'])
                if r.status_code == 200:
                    parse_cont(parse_sons_list(parse_left(r.text)))
                    parse_pages(parse_left(r.text))
                    TodoUrl.removeUrl(url['id'])
                    finishUrl = FinishUrl(url['url'])
                    finishUrl.save()
        else:
            finishUrl = FinishUrl(seed_url)
            if finishUrl.existUrl():
                pass
            else:
                r = requests.get(seed_url)
                if r.status_code == 200:
                    parse_cont(parse_sons_list(parse_left(r.text)))
                    parse_pages(parse_left(r.text))
                    finishUrl = FinishUrl(seed_url)
                    finishUrl.save()


if __name__ == '__main__':
    main()
