# -*- coding: utf-8 -*-
import scrapy
import urllib.request
# from lxml import etree
# import requests

class TbSpider(scrapy.Spider):
    name = 'tb'
    allowed_domains = ['tieba.baidu.com']
    start_urls = ['https://tieba.baidu.com/f?kw=%E6%9D%8E%E6%AF%85&pn=0']

    def parse(self, response):
        # with open("a.html","w",encoding="utf-8") as f:
        #     f.write(response.body.decode("utf-8","ignore"))
        # data = response.body.decode("utf-8","ignore")
        # data = data.replace(r'<!--', '"').replace(r'-->', '"')
        # content = etree.HTML(data)

        # 根据帖子进行分组
        # li_list = content.xpath('//ul[@class="threadlist_bright j_threadlist_bright"]/li')[1:] #切片
        li_list = response.xpath('//ul[@class="threadlist_bright j_threadlist_bright"]/li')[1:]  # 切片
        print(type(li_list))
        #li_list = response.xpath('//ul[@class="threadlist_bright j_threadlist_bright"]/li[@class=" j_thread_list clearfix"]')
        for li in li_list:
            item = {}
            item["title"] = li.xpath('.//div[@class="threadlist_title pull_left j_th_tit "]/a/@title').extract_first()
            item["href"] = li.xpath('.//div[@class="threadlist_title pull_left j_th_tit "]/a/@href').extract_first()
            item["img_list"] = []
            if item["href"] is not None:
                item["href"] = urllib.request.urljoin(response.url,item["href"])
                yield scrapy.Request(
                    item["href"],
                    callback=self.parse_detail,
                    meta = {"item":item},
                )
        #列表页的翻页
        next_url = response.xpath('//a[text()="下一页>"]/@href').extract_first()
        if next_url is not None:
            next_url = urllib.request.urljoin(response.url,next_url)
            yield scrapy.Request(
                next_url,
                callback=self.parse
            )


    def parse_detail(self,response):
        item = response.meta["item"]
        # if item["img_list"] is None:
        #     item["img_list"] = response.xpath('//div[@class="d_post_content j_d_post_content "]//img[@class="BDE_Image"]/@src').extract()
        # else:
        #     item["img_list"].extend(response.xpath('//div[@class="d_post_content j_d_post_content "]//img[@class="BDE_Image"]/@src').extract())
        item["img_list"].extend(response.xpath('//div[@class="d_post_content j_d_post_content "]//img[@class="BDE_Image"]/@src').extract())
        next_url = response.xpath('//a[text()="下一页"]/@href').extract_first()
        if next_url is not None:
            next_url = urllib.request.urljoin(response.url,next_url)
            yield scrapy.Request(
                next_url,
                callback=self.parse_detail,
                meta={"item":item}
            )
        else:
            # yield item
            # 按下面的方式切割去最后一个！！
            # item["img_list"] = [requests.utils.unquote(i).split("src=")[-1] for i in item["img_list"]]
            print(item)

"""
注意1：属性的属性值包含：div_list = response.xpath('//div[comtains(@class,"i")]')
注意2：item["img_list"]，翻页之后会修改这个键对应的值（第二页会替换第一页img_list）
注意3：
    利用xpath爬取贴吧时返回的列表为空的问题解决方法——不同的浏览器,web服务器发送的页面也会不同,ie浏览器是标准的，所以不管任何服务器发送给ie的页面都是标准的，所以我们在用xpath解释页面的时候用的请求头要用IE的User-Agent
    使用别的用户代理，一大串内容被注释掉了！！！
注意3：图片的网址是有问题的，需要做一些处理（不过2019-12-19不需要）
"""