import json
import re
import scrapy
import time
import pprint
class TiebaSpider(scrapy.Spider):
    name = 'tieba'
    allowed_domains = ['tieba.baidu.com']
    start_urls = ['https://tieba.baidu.com/mg/f/getFrsData?kw=%E5%AD%A6%E6%A0%A1&rn=10&pn=1&is_good=0&cid=0&sort_type=0&fr=&default_pro=1&only_thread_list=1&eqid=']

    # def start_requests(self):
    #     # geturl = input("请输入需要爬取的贴吧名称:")
    #     defaulturl = "学校"
    #     url = "http://tieba.baidu.com/f?ie=utf-8&kw=%s&tab=main" % defaulturl
    #     cookies="BIDUPSID=C12E52F9BCA32D90207A2F5CB6A5E3B8; PSTM=1607242831; BAIDUID=C12E52F9BCA32D90E0879C0CD74E683D:FG=1; H_WISE_SIDS=107314_110085_114551_127969_131423_144966_154212_156286_162371_162898_164164_165133_165136_165194_165328_165331_165517_165617_165628_165737_166148_166184_166253_166571_166830_167069_167108_167295_167423_167537_167672_168034_168202_168389_168402_168459_168501_168542_168564_168616_168735_168908_168913_169055_169112_169164_169307_8000000; BDUSS=DBsSWJnRDRReXdhTk5aVG9zNFYwWWp-OXFPbEdzZ1lhQn5DNDhqWH5xeEFmR2xnSVFBQUFBJCQAAAAAAAAAAAEAAAACTVReuLbPvLDrAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEDvQWBA70FgN; BDUSS_BFESS=DBsSWJnRDRReXdhTk5aVG9zNFYwWWp-OXFPbEdzZ1lhQn5DNDhqWH5xeEFmR2xnSVFBQUFBJCQAAAAAAAAAAAEAAAACTVReuLbPvLDrAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEDvQWBA70FgN; BAIDUID_BFESS=41C981A28A3F65E6B0846538CFE6128D:FG=1; BDRCVFR[4r8LXJfwh-6]=I67x6TjHwwYf0; delPer=0; PSINO=7; H_PS_PSSID=26350; __yjs_duid=1_dca5b55dfd1ddfe07212e109220b398e1621223154681; ab_sr=1.0.0_NWE4NWU2ZjgzOWJlYzlhNzBhYjkwYTZlNjllZGExNzU3MGVmZDNjOTg4MGU3NTI2ODA0OWMzY2JhZmQxMmQwZmEzMjZmMGVkNDRlZDYzODQyYzM2MjE3NWQ3MWQ4NmY3YWU2NTUwYmRiZTU5ODQ3ZDU1NzA5YzFiMmNkNTNjZDU="
    #     yield scrapy.Request(
    #         url,
    #         cookies={
    #             i.split("=")[0]:i.split("=")[1]
    #             for i in cookies.split(";")
    #         },
    #         callback=self.parse
    #     )

    def parseDetail(self,item):
        # pprint.pprint(item)
        obj = {}
        
        try:
            obj["评论时间"] = time.strftime("%Y--%m--%d %H:%M:%S", time.localtime(int(item.get("create_time"))))
            obj["标题id"] = str(item.get("tid"))
            obj["赞同数"] = str(item.get("agree").get("agree_num") )
            obj["不赞同数"] = str(item.get("agree").get("disagree_num"))
            if item.get("agree").get("agree_num") == None:
                obj["赞同数"] = "0"
            if item.get("agree").get("disagree_num") == None:
                obj["不赞同数"] = "0"
            obj["作者Id"] = str(item.get("author").get("id"))
            obj["作者名字"] = item.get("author").get("name").replace(",","")
            obj["回复数"] = str(item.get("reply_num"))
            obj["评论内容"] = item.get("rich_abstract")[0].get("text").replace("<br/>","").replace(",","")
        except Exception as e:
            pass

        with open("./ama/csvhandle/tieba.csv","a",encoding='utf-8') as f:
            f.write(",".join(list(obj.values())))
            f.write("\n")

    def parse(self, response):

        nextcurrentpage= int(json.loads(response.text).get("data").get("page").get("current_page"))+1
        totalpage = json.loads(response.text).get("data").get("page").get("total_page")
        threadlist=json.loads(response.text).get("data").get("thread_list")
        print("\r\n正在爬虫第%d页数据\r\n" % (nextcurrentpage-1)) 
        for i in threadlist:
            self.parseDetail(i)
            # return
        if nextcurrentpage<=totalpage:
            yield scrapy.Request(
                "https://tieba.baidu.com/mg/f/getFrsData?kw=学校&rn=10&pn=%d&is_good=0&cid=0&sort_type=0&fr=&default_pro=1&only_thread_list=1&eqid=" % nextcurrentpage,
                callback=self.parse
            )