import json

import requests
from lxml import etree
class lemmas():
    def __init__(self,title,abstract,infobox,subject,disambi,
                 redirect,curLink,interPic,interLink,exterLink,relateLemma,all_text):
        self.title=title
        self.abstract=abstract
        self.infobox=infobox
        self.subject=subject
        self.disambi=disambi
        self.redirect=redirect
        self.curLink=curLink
        self.interPic=interPic
        self.interLink=interLink
        self.exterLink=exterLink
        self.relateLemma=relateLemma
        self.all_text=all_text

def crawl_page(url, title):
    headers = {
        "User-Agent": "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome / 88.0.4324.150Safari / 537.36Edg / 88.0.705.68"
    }
    r = requests.get(url, headers=headers)
    r.encoding = 'utf-8'
    tree = etree.HTML(r.text)
    # main_contain = tree.xpath("//div[@class='main-content']")

    # title,redirect
    thistitle = tree.xpath("//dd[@class='lemmaWgt-lemmaTitle-title']/h1/text()")[0]
    redirect = ""
    if (thistitle != title):
        redirect = title
        title = thistitle

    # title_id  源数据库并没有设为自增字段，或许用触发器解决？

    # abstract
    abstract = tree.xpath("//div[@class='lemma-summary']//text()")
    abstract="".join(abstract)
    # infobox
    basicInfo = tree.xpath("//div[contains(@class,'basic-info')]/dl/*")
    print(len(basicInfo))
    infobox = {}
    for i in range(0, len(basicInfo), 2):
        infobox[normalize_text(basicInfo[i])] = normalize_text(basicInfo[i + 1])
    infobox = json.dumps(infobox)

    # subject 并没有找到对应的字段
    subject="a"
    # disambi
    disambi = tree.xpath("//dd[@class='lemmaWgt-lemmaTitle-title']/h2/text()")
    if disambi:
        disambi = title + ' ' + disambi[0]
    else:
        disambi = title

    # curLink
    curLink = url

    # interPic 为什么只有一行
    interPic=[]
    # interLink
    interLink={}
    # exterLink
    exterLink={}
    # relateLemma
    relateLemma=""
    # all_text
    all_text = tree.xpath("//div[@class='para']//text()")
    all_text = ''.join(all_text)
    print(infobox)
    # time.sleep(0.3)
def normalize_text(i):
    i = i.xpath(".//text()")
    i = "".join(i)
    i = i.replace("\n", "")
    i = i.replace("\xa0", "")
    return i
crawl_page("https://baike.baidu.com/item/%E8%8B%B9%E6%9E%9C/5670",title="苹果")