import json

import scrapy
from myspider.items import MyspiderItem


# scrapy crawl eastworthytest --nolog
class EastworthytestSpider(scrapy.Spider):
    name = "eastworthytest"
    allowed_domains = ["guba.eastmoney.com"]

    start_urls = ["https://guba.eastmoney.com/list,fshferbm_2.html"]
    # start_urls = ["https://guba.eastmoney.com/api/getData?code=600820&path=reply/api/Reply/ArticleNewReplyList"]
    # new_urls = [f"https://guba.eastmoney.com/list,fshferbm_{i}.html" for i in range(2, 5)]
    # start_urls.extend(new_urls)
    count = 1

    # def start_requests(self):
    #     # 第一次请求发起前先填充一下ip池
    #     ips = requests.get('你的ip获取的地址')
    #     for ip in ips.text.split('\r\n'):
    #         ipPool.append('http://' + ip)
    def parse(self, response):
        # print('用户代理',response.request.headers['User-Agent'])
        # with open('test.html', 'wb') as file:
        #     file.write(response.body)
        node_list = response.xpath('//tr[@class="listitem "]')
        for node in node_list:#[0:5]:
            temp = MyspiderItem()
            temp['id'] = self.count
            temp['code'] = None
            temp['name'] = None
            temp['reads'] = node.xpath('./td[1]/div/text()')[0].extract()
            temp['review'] = node.xpath('./td[2]/div/text()')[0].extract()
            temp['title'] = node.xpath('./td[3]/div/a/text()')[0].extract()
            # temp['content'] = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())  # @href
            content = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())
            temp['author'] = node.xpath('./td[4]/div/a/text()')[0].extract()
            temp['postdate'] = node.xpath('./td[5]/div/text()')[0].extract()
            temp['reply'] = -1
            # print(temp)

            # yield scrapy.Request(
            #     url=content,
            #     callback=self.parse_content,
            #     meta={'temp': temp, 'id': self.count}
            # )
            self.count += 1
        # nextpage=response.xpath('//a[@class="nextp"]')
        # for next in nextpage:
        #     # print("+++++++++++++++++++++++",next.xpath('./@href')[0].extract())
        #     yield scrapy.Request(
        #         url=next.xpath('./@href')[0].extract(),
        #         callback=self.parse,
        #     )

    def parse_content(self, response):
        # with open('test.html', 'wb') as file:
        #     file.write(response.body)
        temp = response.meta['temp']
        contents = response.xpath('//div[@class="newstext "]')[0].css('*::text')
        content = ''
        for s in contents:
            content += s.extract()
        # print(content)
        temp['content'] = content
        # print(temp)
        yield temp
        print('准备解析内标签', 'id号:', response.meta['id'], '评论数：', temp['review'], '标题：', temp['title'])
        node_list = response.xpath('//div[@class="reply_item cl  "]')
        print("单独评论数", len(node_list))
        for node in node_list:
            reply = MyspiderItem()
            reply['id'] = self.count
            reply['code'] = None
            reply['name'] = None
            reply['reads'] = None
            reply['review'] = None
            # 使用相对路径
            retitle = node.xpath('.//div[@class="reply_title"]')
            reply['title'] = ''
            if retitle:
                for s in retitle[0].css('*::text'):
                    reply['title'] += s.extract()
            # 使用相对路径
            reply_list = node.xpath('.//ul[@class="replyListL2"]')
            contents2 = []
            if reply_list:
                contents2 = reply_list[0].css('*::text')
            content2 = ''
            for s in contents2:
                content2 += s.extract()
            reply['content'] = content2
            # reply['content'] = node.xpath('/div[2]/div[4]/span/text()')[0].extract() # @href
            # 使用相对路径
            reply['author'] = node.xpath('.//div[@class="item_reuser"]/a/text()').extract_first()
            # 使用相对路径和正确的索引
            reply['postdate'] = node.xpath('.//div[@class="publishtime"]/span[1]/text()').extract_first()
            reply['reply'] = response.meta['id']
            print('reply================', reply)
            self.count += 1
            yield reply