import json
import socketserver
import time
import re
from urllib import parse
import requests
from lxml import etree

class myTCPhandler(socketserver.BaseRequestHandler):
    headers = {
        "User-Agent": "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome / 88.0.4324.150Safari / 537.36Edg / 88.0.705.68"
    }

    def handle(self):
        while True:
            self.data = self.request.recv(1024).decode('UTF-8', 'ignore').strip()
            if not self.data : break
            # ---爬虫部分
            self.crawl(self.data)
            # ---爬虫部分


    def crawl(self,title):
        msg="\n"
        # print(time.strftime("%Y-%m-%d %H:%M:%S") + " search for: " + title)
        msg+=time.strftime("%Y-%m-%d %H:%M:%S") + " search for: " + title+"\n"
        url='https://baike.baidu.com/item/{}?force=1'.format(title)
        r = requests.get(url, headers=self.headers)
        r.encoding = 'utf-8'
        if re.search("error\.html",r.url):
            msg+=title+": no result in baidubaike."+"\n"
            self.baikeLog(msg)
            return
        tree = etree.HTML(r.text)
        isPolysemant =tree.xpath("//div[@class='lemmaWgt-subLemmaListTitle']")
        if isPolysemant:
            subLists=tree.xpath("//div[@class='main-content']/ul//a/@href")
            for subList in subLists:
                msg += "crawl: " + subList + "\n"

            self.baikeLog(msg)
            for subList in subLists:
                self.crawl_page(parse.urljoin(url,subList),title)
        else:
            msg+="crawl: "+r.url+"\n"
            self.baikeLog(msg)
            self.crawl_page(url,title)

    def crawl_page(self,url,title):
        r = requests.get(url, headers=self.headers)
        r.encoding = 'utf-8'
        tree = etree.HTML(r.text)
        # main_contain = tree.xpath("//div[@class='main-content']")

        #title,redirect
        thistitle = tree.xpath("//dd[@class='lemmaWgt-lemmaTitle-title']/h1/text()")[0]
        redirect=""
        if(thistitle!=title):
            redirect=title
            title=thistitle

        #title_id  源数据库并没有设为自增字段，或许用触发器解决？

        #abstract
        abstract=tree.xpath("//div[@class='lemma-summary']//text()")
        abstract = "".join(abstract)
        #infobox
        basicInfo=tree.xpath("//div[contains(@class,'infobox')]/dl/*//text()")
        infobox = {}
        for i in range(0,len(basicInfo),2):
            infobox[basicInfo[i]]=basicInfo[i+1]
        infobox=json.dumps(infobox)

        #subject 并没有找到对应的字段

        #disambi
        disambi=tree.xpath("//dd[@class='lemmaWgt-lemmaTitle-title']/h2/text()")
        if disambi:
            disambi=title+' '+disambi[0]
        else:
            disambi=title

        #curLink
        curLink=url

        #interPic 为什么只有一行

        #interLink

        #exterLink

        #relateLemma

        #all_text
        all_text=tree.xpath("//div[@class='para']//text()")
        all_text='\n'.join(all_text)

        time.sleep(0.3)

    def baikeLog(self,msg):
        with open("baikelog.txt", mode="a+", encoding="utf-8") as log:
            log.write(msg)
host = 'localhost'
port = 9007
server = socketserver.ThreadingTCPServer((host,port),myTCPhandler)
server.serve_forever()
