#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :gushiwen.py
# @Time      :2024/12/20 
# @Author    :CL
# @email     :1037654919@qq.com
from bs4 import BeautifulSoup
import requests
import re

from retrying import retry
from utils import mongo_manager
gushiwen_poetry_content = mongo_manager(collect_name='gushiwen_poetry_content',db='cl_data')

def convert_pinyin_to_numeric(pinyin):

    # 声调字母与无声调字母的映射关系
    tone_to_no_tone_map = {
        'ā': 'a', 'á': 'a', 'ǎ': 'a', 'à': 'a',
        'ō': 'o', 'ó': 'o', 'ǒ': 'o', 'ò': 'o',
        'ē': 'e', 'é': 'e', 'ě': 'e', 'è': 'e',
        'ī': 'i', 'í': 'i', 'ǐ': 'i', 'ì': 'i',
        'ū': 'u', 'ú': 'u', 'ǔ': 'u', 'ù': 'u',
        'ǖ': 'u:', 'ǘ': 'u:', 'ǚ': 'u:', 'ǜ': 'u:'
    }

    # 声调字母与数字的映射关系
    tone_map = {
        'ā': '1', 'á': '2', 'ǎ': '3', 'à': '4',
        'ō': '1', 'ó': '2', 'ǒ': '3', 'ò': '4',
        'ē': '1', 'é': '2', 'ě': '3', 'è': '4',
        'ī': '1', 'í': '2', 'ǐ': '3', 'ì': '4',
        'ū': '1', 'ú': '2', 'ǔ': '3', 'ù': '4',
        'ǖ': '1', 'ǘ': '2', 'ǚ': '3', 'ǜ': '4'
    }
    flag_qing = True
    for char in pinyin:
        if char in tone_map:
            pinyin = pinyin.replace(char, tone_to_no_tone_map[char] ) + tone_map[char]
            flag_qing = False
            break
    if flag_qing:
        pinyin = pinyin + '5'

    return pinyin
class Gushiwen:
    def __init__(self):
        self.headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "no-cache",
            "pragma": "no-cache",
            "priority": "u=0, i",
            "referer": "https://www.gushiwen.cn/gushi/songsan.aspx",
            "sec-ch-ua": "\"Chromium\";v=\"130\", \"Google Chrome\";v=\"130\", \"Not?A_Brand\";v=\"99\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Linux\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "same-origin",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
        }
        self.cookies = {
            "login": "flase",
            "Hm_lvt_9007fab6814e892d3020a64454da5a55": "1734680938",
            "HMACCOUNT": "8A7B5B515FA69801",
            "ticketStr": "206159309%7cgQFF8DwAAAAAAAAAAS5odHRwOi8vd2VpeGluLnFxLmNvbS9xLzAyUW1abVJPbGVkN2kxR1hLYzFEMWkAAgS7IWVnAwQAjScA",
            "ASP.NET_SessionId": "z0zbfdmb4hddanc1o2oyfyzw",
            "codeyz": "f2925faf53ae2e8e",
            "gsw2017user": "4195998%7cC2445FE659F13C4CD84005FEFB503CAF%7c2000%2f1%2f1%7c2000%2f1%2f1",
            "wxopenid": "defoaltid",
            "gswZhanghao": "15801366532",
            "gswPhone": "15801366532",
            "userPlay": "4195998%7C0%7C7",
            "Hm_lpvt_9007fab6814e892d3020a64454da5a55": "1734682882"
        }
    # 获取诗文类别、作者、朝代、形式 的关键词链接
    def get_category(self):
        url = "https://www.gushiwen.cn/shiwens/"
        response = requests.get(url, headers=self.headers, cookies=self.cookies)
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, "html.parser")
            category_list = soup.find("div", class_="titletype").find_all('div',class_='sright')
            labels = ['类型', '作者', '朝代', '形式','类型_重定向']
            category_list_url ={}
            # xpath  //*[@id="right1"]/div[2] 使用xpath 获取元素
            category_new_url  =soup.find("div", class_="sons",id ='right1').find_all('a')

            for i, category in enumerate(category_list):
                category_list_url[labels[i]] = category.find_all('a')
            category_list_url['类型_重定向'] = category_new_url
            return category_list_url

    # 对不同的形式的链接的构造方式略有不同，类型中，有部分网址会重定向，如果解析失败，放弃该网址，使用
    #https://www.gushiwen.cn/gushi/shijing.aspx 形式的网址，并使用get_poetry 解析

    # 获取每个分类下的所有诗歌
    def get_poetry(self, url = 'https://www.gushiwen.cn/gushi/songci.aspx'):
        # url = "https://www.gushiwen.cn/gushi/songci.aspx"
        response = requests.get(url, headers=self.headers, cookies=self.cookies)

        print(response.url,response)
        html = response.text
        soup = BeautifulSoup(html, 'html.parser')
        poetry_list = soup.find('div', class_='sons').find_all('a')
        return poetry_list

    # 获取基于搜索词的所有诗歌，词的列表基于网页解析
    def get_poetry_by_search(self, url = 'https://www.gushiwen.cn/shiwens/default.aspx?page=2&tstr=%e5%a4%8f%e5%a4%a9&astr=&cstr=&xstr='):
        '''
        :param url: url  = 'https://www.gushiwen.cn/shiwens/default.aspx?page=2&tstr=%e5%a4%8f%e5%a4%a9&astr=&cstr=&xstr='
        url参数说明
        page: 页数
        tstr: 类型
        astr: 作者
        cstr: 朝代
        xstr: 形式
        :return:
        '''

        response = requests.get(url, headers=self.headers, cookies=self.cookies)
        print(response.url,response)

        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            try:
                yizhus = soup.find('div', id='leftZhankai').find_all('div', id=re.compile('^zhengwen.+'))
                poetry_list =[]
                for poetry in yizhus:
                    # poetry_id = poetry.find('div', id=re.compile('^zhengwen.+')).find('a')
                    poetry_list.append(poetry.find('a'))

                return poetry_list
            except:
                return None



    # 获取诗歌内容。重复5次，延迟增加2
    @retry(stop_max_attempt_number=5, wait_fixed=2000)
    def get_poetry_content(self,  url = "https://www.gushiwen.cn/shiwenv_8381592e4a1e.aspx"):
        response = requests.get(url, headers=self.headers, cookies=self.cookies)
        print(response.url,response)
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            sons = soup.find('div',class_ ='sons')
            # print(sons)\
            # 如果，没有找到sons ，返回空
            if sons is None:
                return None

            # 使用正则表达式查找id以"zhengwen"开头的div标签
            div = sons.find('div', id=re.compile('^zhengwen.+'))

            title = div.find('h1').text
            author = div.find('p', class_='source').find('a').text.strip()
            chaodai = div.find('p', class_='source').find_all('a')[-1].text
            content = div.find('div', class_='contson').text.strip()

            data = soup.find('div',class_ ='yizhu')

            id_str= data.find('img',alt = '背诵')['onclick']
            poetry_id = str(id_str).split(',')[-1].replace(')','').replace("'",'')
            print(poetry_id)
            poetry_content = {'url':url,'_id':url,
                'title':title, 'author':author, 'chaodai':chaodai,'content':content,'poetry_id':poetry_id}

            return poetry_content



    # 获取诗歌内容，带拼音
    @retry
    def get_poetry_content_pinyin(self, url = "https://www.gushiwen.cn/nocdn/ajaxshiwencont230427.aspx",params = None):
        if params is None:
            params = {
                "id": "C1C851F324E39398B274E5C605936197", # 诗歌id，需要根据诗歌页面url获取
                "value": "yin"
            }
        response = requests.get(url, headers=self.headers, cookies=self.cookies, params=params)

        print(response.url,response)
        if response.status_code == 200:
            html = response.text
            soup = BeautifulSoup(html, 'html.parser')
            # find_all('span',不要深层 #.find_all('span', recursive=False)
            # 获取拼音列表，并将拼音转为数字形式
            # title  author zhengwen
            title = soup.find_all('p')[0]
            author = soup.find_all('p')[-1]
            zhengwen = soup.find('div',class_='pinyinContson')
            # 添加书名号
            pinyin_list =['\u30005']
            hanzi =  ['《']
            # 添加诗词名
            pinyin_list += [convert_pinyin_to_numeric(shici.text) for shici in title.find_all('span',class_='pinyin')]
            hanzi += [shici.text for shici in title.find_all('span', class_='hanzi')]
            # 添加书名号
            pinyin_list += ['\u30005']
            hanzi +=['》']
            # 添加作者名
            pinyin_list += [convert_pinyin_to_numeric(shici.text) for shici in author.find_all('span', class_='pinyin')]
            hanzi += [shici.text for shici in author.find_all('span', class_='hanzi')]
            # 添加正文
            pinyin_list += [convert_pinyin_to_numeric(shici.text) for shici in zhengwen.find_all('span',class_='pinyin')]
            hanzi += [shici.text for shici in zhengwen.find_all('span', class_='hanzi')]
            return list(zip(pinyin_list, hanzi))

gushiwen = Gushiwen()
# 基于类别下载
def main():
    categories = gushiwen.get_category()['类型_重定向']

    print(categories[:10]) # 获取前10个分类
    for category in categories[10:]:
        url = 'https://www.gushiwen.cn'+category['href']
        poetry_list = gushiwen.get_poetry(url)
        print(url, category.text,len(poetry_list))
        for poetry in poetry_list[:]:
            #判断是否有href ，没有 跳过
            if 'href'  not in poetry.attrs:
                continue
            poetry_url = 'https://www.gushiwen.cn'+poetry['href']
            title = poetry.text
            print(poetry_url,title)
            # 可以在这里保存诗词的种子，用于数据库的维护

            poetry_content = gushiwen.get_poetry_content(poetry_url)
            if poetry_content:
                params = {
                    "id": poetry_content['poetry_id'],  # 诗歌id，需要根据诗歌页面url获取
                    "value": "yin"
                }
                # 获取拼音列表，并将拼音转为数字形式，
                pinyin_content = gushiwen.get_poetry_content_pinyin(params = params)
                if pinyin_content:
                    poetry_content['pinyin_content'] =pinyin_content
                #保存 poetry_content
                try :
                    gushiwen_poetry_content.insertOne(poetry_content)
                except Exception as e:
                    print(e)
        #     break
        # break
# 基于搜索下载
def main2(): # 下载文言文
    categories = gushiwen.get_category()
    labels = ['类型', '作者', '朝代', '形式', '类型_重定向']
    for label in labels[:4]:  # 获取前4个分类
        for category in categories[label]:
            base_url = 'https://www.gushiwen.cn'+category['href']
            for page in range(1,20):
                if page == 1:
                    url = base_url
                else:
                    url = base_url + '&page=' + str(page)
                poetry_list = gushiwen.get_poetry_by_search(url)
                if poetry_list:
                    print(len(poetry_list),poetry_list)
                    for poetry in poetry_list[:]:
                        poetry_url = 'https://www.gushiwen.cn'+poetry['href']
                        title = poetry.text
                        print(poetry_url,title)
                        # 可以在这里保存诗词的种子，用于数据库的维护
                        poetry_content = gushiwen.get_poetry_content(poetry_url)
                        if poetry_content:
                            params = {
                                "id": poetry_content['poetry_id'],  # 诗歌id，需要根据诗歌页面url获取
                                "value": "yin"
                            }
                            # 获取拼音列表，并将拼音转为数字形式，
                            pinyin_content = gushiwen.get_poetry_content_pinyin(params = params)
                            if pinyin_content:
                                poetry_content['pinyin_content'] =pinyin_content
                            #保存 poetry_content
                            try :
                                gushiwen_poetry_content.insertOne(poetry_content)
                            except Exception as e:
                                print(e)
                    if len(poetry_list) != 10:
                        break
                else:
                    print(f'{url}没有数据')
                    break



                #     break




if __name__ == '__main__':
    # 基于分类下载
    main()
    # 基于搜索下载
    # main2()

