# -*- coding: utf-8 -*-
import urllib

import requests
import scrapy


class TbSpider(scrapy.Spider):
    name = 'tb'
    allowed_domains = ['tieba.baidu.com']
    start_urls = ['http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/m?kw=%E6%9D%8E%E6%AF%85&lp=9001']

    def parse(self, response):
        #根据帖子进行分组
        div_list=response.xpath("//div[contains(@class,'i')]")
        for div in div_list:
            item={}
            item["href"]=div.xpath("./a/@href").extract_first()
            item["title"]=div.xpath("./a/text()").extract_first()
            item["img_list"] = []
            if item["href"] is not None:
                item["href"]="http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/"+item["href"]
                yield scrapy.Request(
                    item["href"],
                    callback=self.parse_detail,
                    meta={"item":item}
                )
        #列表也的翻页处理下一页
        next_url=response.xpath("//a[text()='下一页']/@href").extract_first()
        if next_url is not None:
            next_url = "http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/"+next_url
            yield scrapy.Request(
                next_url,
                callback=self.parse,
            )
    #详情页面数据ｖ抓取
    def parse_detail(self,response):
        item=response.meta["item"]
        item["img_list"].extend(response.xpath("//img[@class='BDE_Image']/@src").extract())
        next_url=response.xpath("//a[text()='下一页']/@href").extract_first()
        if next_url is not None:
            next_url="http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/"+next_url
            yield scrapy.Request(
                next_url,
                callback=self.parse_detail,
                meta={"item":item}
            )
        else:
            item["img_list"]=[requests.utils.unquote(i).split("src=")[-1] for i in item["img_list"]]
            print(item)