import requests
import time
import pymysql
import re
from lxml import html
etree=html.etree
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
}

#往表里面添加诗词名
def dosql(s1,s2,s3,s4,s5,s6):
    conn = pymysql.Connect(
        host='localhost',  ##mysql服务器地址
        port=3306,  ##mysql服务器端口号
        user='root',  ##用户名
        passwd='123456',  ##密码
        db='dzy',  ##数据库名
        charset='utf8'  ##连接编码
    )
    cursor = conn.cursor()
    try:
        if s5 == '':
            s5+='暂无。'
        if s6 == '未登录' or s6 == '1':
            s6='暂无。'
        sql_insert = "INSERT INTO ts_yf VALUES (%s,%s,%s,%s,%s,%s); "
        data = [s1, s2, s3, s4, s5, s6]
        cursor.execute(sql_insert,data)
        conn.commit()
        conn.close()
        cursor.close()
    except:
        print("插入失败")
        pass
def cs(s):
    s = s.replace('〔', "")
    s = s.replace('〕', "")
    s = s.replace(' ', "")
    s = s.replace('\n', "")
    s = s.replace('\r', "")
    s = s.replace('参考资料：', "")
    s = s.replace('译文', "")
    s = s.replace('注释', "")
    s = s.replace("'", '')
    s = s.replace(")", '')
    s = s.replace('　　', "")
    s = s.replace('\u3000', "")
    s = s.replace('▲', "")
    return s
#对注释进行处理,查找两次，一次是不全，一次是全，如果全的不为空，则返回全的注释
def getzs(url,url1):#url:子页面的url;url1:首页面的url
    rz = requests.get(url)#子页面的url
    rz.encoding = 'utf-8'
    sz = rz.text
    treez = etree.HTML(sz)
    rz1=treez.xpath('/html/body/div[2]/div[1]/div[3]/@id')
    rz1=''.join(rz1)
    rm=''
    for i in rz1:
        if '0' <= i <= '9':
            rm+=i
    if rm!='':#表示有全翻译
        rz11=treez.xpath('/html/body/div[2]/div[1]/div[3]/div/div[1]/@onclick')
        rz11= ''.join(rz11)
        rz11= rz11.split(',')[1]
        rz11= cs(rz11)#75B9117033181D93
        #全翻译的url
        url2=url1+'nocdn/ajaxfanyi.aspx?id='+rz11
        rq=requests.get(url2,headers=headers)
        rq.encoding='utf-8'
        rq=rq.text
        treeq=etree.HTML(rq)
        rq1=treeq.xpath('/html/body/div[1]/p/text()')
        rq2 = ''.join(rq1)
        return rq2
    else:
        rq = treez.xpath("/html/body/div[2]/div[1]/div[3]/div[1]//p/text()")
        rq=''.join(rq)
        return rq
    rz.close()
#对赏析进行处理
def getsx(url,url1):
    f='shangxi'
    rs=requests.get(url,headers=headers)
    rs.encoding='utf-8'
    rsp=rs.text
    #xpath
    treers=etree.HTML(rsp)
    rs1=treers.xpath('/html/body/div[2]/div[1]/div[4]/@id')#翻译的id
    rs11=treers.xpath('/html/body/div[2]/div[1]/div[5]/@id')
    rs12 = treers.xpath('/html/body/div[2]/div[1]/div[6]/@id')
    rs13 = treers.xpath('/html/body/div[2]/div[1]/div[7]/@id')
    rs1=''.join(rs1)
    rs11 = ''.join(rs11)
    rs12 = ''.join(rs12)
    rs13 = ''.join(rs13)
    if f in rs1:#没有那个破按钮
        rs2 = treers.xpath('/html/body/div[2]/div[1]/div[4]/div/div[1]/@onclick')  # 判断一下是否两个解析
        rs2 = ''.join(rs2)
        if ',' in rs2:
            rs2 = rs2.split(',')[1]
        else:
            rs2 = treers.xpath('/html/body/div[2]/div[1]/div[4]/div/div[1]/@onclick')
            rs2 = ''.join(rs2)
        rs2 = cs(rs2)
        urlrs2 = url1 + 'nocdn/ajaxshangxi.aspx?id=' + rs2
        res = requests.get(urlrs2)
        print(urlrs2)
        res.encoding = 'utf-8'
        rs3 = res.text
        treers3 = etree.HTML(rs3)
        rs3 = treers3.xpath(f"/html/body/div[1]//a/text()|//p/text()")
        rs3 = ''.join(rs3)
        ss1 = cs(rs3)
        print(rs3)
        return ss1[0:1500]
    else:
        if f in rs11:#赏析在上
            rs2=treers.xpath('/html/body/div[2]/div[1]/div[5]/div/div[1]/@onclick')   #判断一下是否两个解析
            rs2=''.join(rs2)
            if ',' in rs2:
                rs2 = rs2.split(',')[1]
            else:
                rs2 = treers.xpath('/html/body/div[2]/div[1]/div[5]/div/div[1]/@onclick')
                rs2 = ''.join(rs2)
            rs2 = cs(rs2)
            urlrs2=url1+'nocdn/ajaxshangxi.aspx?id='+rs2
            res = requests.get(urlrs2)
            print(urlrs2)
            res.encoding = 'utf-8'
            rs3 = res.text
            treers3 = etree.HTML(rs3)
            rs3 = treers3.xpath(f"/html/body/div[1]//a/text()|//p/text()")

            rs3 = ''.join(rs3)
            ss1 = cs(rs3)
            print(rs3)
            return ss1[0:1500]
        else:#赏析在下
            if f in rs12:
                rs2 = treers.xpath('/html/body/div[2]/div[1]/div[6]/div/div[1]/@onclick')
                rs2 = ''.join(rs2)
                if ',' in rs2:
                    rs2 = rs2.split(',')[1]
                else:
                    rs2 = treers.xpath('/html/body/div[2]/div[1]/div[6]/div/div[1]/@onclick')
                    rs2 = ''.join(rs2)
                rs2 = cs(rs2)
                urlrs2 = url1 + 'nocdn/ajaxshangxi.aspx?id=' + rs2
                print(urlrs2)
                res = requests.get(urlrs2,headers=headers)
                res.encoding = 'utf-8'
                rs3 = res.text
                treers3 = etree.HTML(rs3)
                rs3 = treers3.xpath(f"/html/body/div[1]//p/text()")
                rs3 = ''.join(rs3)
                ss1 = cs(rs3)
                return ss1[0:1500]
            if f in rs13:
                rs2 = treers.xpath('/html/body/div[2]/div[1]/div[7]/div/div[1]/@onclick')
                rs2 = ''.join(rs2)
                if ',' in rs2:
                    rs2 = rs2.split(',')[1]
                else:
                    rs2 = treers.xpath('/html/body/div[2]/div[1]/div[7]/div/div[1]/@onclick')
                    rs2 = ''.join(rs2)
                rs2 = cs(rs2)
                urlrs2 = url1 + 'nocdn/ajaxshangxi.aspx?id=' + rs2
                print(urlrs2)
                res = requests.get(urlrs2,headers=headers)
                res.encoding = 'utf-8'
                rs3 = res.text
                treers3 = etree.HTML(rs3)
                rs3 = treers3.xpath(f"/html/body/div[1]//p/text()")
                rs3 = ''.join(rs3)
                ss1 = cs(rs3)
                return ss1[0:1500]
    rs.close()
#每首诗或词的url
def child(url,url1,st1,st2):#url:子页面的url；url1:首页的url;st1:诗词名；st2:作者
    print(st1,end=':')
    list1=[]
    respc=requests.get(url,headers=headers)
    respc.encoding='utf-8'
    sc=respc.text
    treec=etree.HTML(sc)
    #朝代
    cd=treec.xpath("/html/body/div[2]/div[1]/div[2]/div[1]/div[2]/p/a[2]/text()")
    cd=''.join(cd)
    cd=cs(cd)
    #内容
    ct = treec.xpath("/html/body/div[2]/div[1]/div[2]/div[1]/div[2]/div[1]/text()")
    if(ct[0]=='\n'):            #判断是否会是绝句
        ct = treec.xpath("/html/body/div[2]/div[1]/div[2]/div[1]/div[2]/div[1]//p/text()")
    else:
        print("yes")
    ct = ''.join(ct)
    ct=cs(ct)
    #注释
    zz=getzs(url,url1)
    zz=cs(zz)
    #赏析,分2种情况，1.注释有两个 2有两个赏析
    ss=getsx(url,url1)
    if'未登录' in str(ss) :
        time.sleep(5)
        ss=getsx(url,url1)
    dosql(st1,st2,cd,ct,zz,ss)
    # 统计列表中每个元素在大字符串中出现的次数
    for i in range(12):
        StringCounts[i]+=zz.count(MyString[i])
    respc.close()
    #respc1.close()
#往表里面添加字符出现个数
def CountsInput(a,b):
    value = ''
    answer = 0
    conn = pymysql.Connect(
        host='localhost',  ##mysql服务器地址
        port=3306,  ##mysql服务器端口号
        user='root',  ##用户名
        passwd='123456',  ##密码
        db='dzy',  ##数据库名
        charset='utf8'  ##连接编码
    )
    cursor = conn.cursor()
    try:
        for i in range(12):
            sql_insert = "INSERT INTO ts_count VALUES (%s,%s); "
            data = [a[i], b[i]]
            cursor.execute(sql_insert,data)
        for j in range(5):
            answer -= b[i] * Rate[i] *b[i] * Rate[i] # 快乐率^2-悲伤率^2-（中性率）*2,大于零则快乐，小于零悲伤
        answer = answer * SumRate[1]
        for j in range(5,10):
            answer += b[i] * Rate[i]* b[i] * Rate[i]  # 快乐率^2-悲伤率^2-（中性率）*2,大于零则快乐，小于零悲伤
        answer = answer * SumRate[2]
        for x in range(10,12):
            answer -= b[i] * Rate[i] * 2*SumRate[0]
        if answer >= ComparedThing:
            value='是'
        else:
            value='否'
        answer=round(answer,4)
        data = [str(answer), value]
        sql_insert = "INSERT INTO ts_count VALUES (%s,%s); "
        cursor.execute(sql_insert, data)
        conn.commit()
        conn.close()
        cursor.close()
    except:
        print("插入失败")
        pass

#古诗文唐诗网
urlts = 'https://so.gushiwen.cn/gushi/tangshi.aspx'
#原古诗文网
url='https://so.gushiwen.cn/'

#统计字段(根据词语意思和情感占比，设立对应参数.且含有中性词，当然也有自身对唐代乐府诗集的一些理解)：中性词和开心的词与悲伤的词参数占比1:2:2
MyString = ['悲','哀',  '讽', '凄', '孤','爱','趣','美', '信','乐','思','惜']#中性词2个，开心的词5个，悲伤的词5个
Rate =     [-0.2,-0.15,-0.25,-0.2,-0.1,0.35,0.05,0.1,0.25,0.25,0.3,0.7]#分别是各个汉字的占比
SumRate = [1/6,5/12,5/12]
StringCounts = [0]*12
#（快乐率-悲伤率）^2-（中性率）*2,大于零则快乐，小于零悲伤
ComparedThing = 0

resp=requests.get(urlts,headers=headers)
resp.encoding='utf-8'
r=resp.text#唐诗页面源代码
#xpath解析
tree=etree.HTML(r)
#信息包含在span内,每次修改这里，j和表
spans=tree.xpath('//*[@id="html"]/body/div[2]/div[1]/div[2]/div[7]/span')
#/html/body/div[2]/div[1]/div[2]/div[i]
l=[]
j=0
for i in spans:
    if j == 20:
        break
    l1=[]
    j1=i.xpath("./a/text()")#名字
    j1 = ''.join(j1)
    #dosql_name(j1)#往表里面添加名字
    j2=i.xpath("./text()")#作者
    j2=''.join(j2)
    j2=j2.replace("(","")
    j2 = j2.replace(")", "")
    #dosql_author(j2)  # 往表里面添加姓名
    j3=i.xpath("./a/@href")#每首诗或词的url
    j3 = url+''.join(j3)
    child(j3,url,j1,j2)
    j+=1
    print(j)
    time.sleep(1)
CountsInput(MyString,StringCounts)
resp.close()