# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from ecnomic_info_bug.items import EcnomicInfoBugItem
import re

class EcnomicInfoSpider(scrapy.Spider):
    name = "ecnomic_info"
    allowed_domains = ["baidu.com"]
    start_urls = (
        'http://tieba.baidu.com/f?kw=%CA%B1%BA%AE%B1%F9&fr=ala0',
    )

    def parse(self, response):
        #获取这个页面下的所有标题的链接
        links = response.xpath("//a[@class='j_th_tit']/@href").extract()
        new_links = []
        for link in links:
            if link not in new_links:
                new_links.append('http://tieba.baidu.com%s'%link)
        #然后构造新的请求来进行访问
        items = []
        for link in new_links:
            item = Request(link,callback=self.parse_one_title)
            items.append(item)
        return items
    def parse_one_title(self, response):
        #获取标题，这个标题有时候会用不用的标题修饰节点，所以直接使用通配符进行匹配
        title = response.xpath("//*[contains(@class,'core_title_txt')]/@title").extract()
        reply_info_list = []
        #按照一个帖子的基本单位，将数据整理出来
        reply_responses = response.xpath('//div[@id="j_p_postlist"]/div[contains(@class,"l_post l_post_bright")]')
        #这个是去除一些无用的空格
        strip_str = re.compile('\s{2,100} |<[^>]+>')
        #这个是去除回复文本中无用的节点
        for reply_response in reply_responses:
            #首先是获取基本的用户信息
            #基本信息是json格式字符串，这里只对人心进行统计分析，不对单个人进行分析处理
            #目的是降低工作量
            #base_user_info = reply_response.xpath('@data-field').extract()
            #然后是对其发言进行记录
            reply_descs = reply_response.xpath('//div[contains(@id,"post_content")]').extract()
            new_reply_descs = []
            for reply_desc in reply_descs:
                tmp_reply = reply_desc.encode('utf-8')
                tmp_r = strip_str.sub('',tmp_reply)
                #print('<%s>'%tmp_r)
                if tmp_reply not in new_reply_descs:
                    if tmp_r != '':
                        new_reply_descs.append(tmp_r)
            #对该发言的临时回复也进行记录
            reply_descs_for_this = reply_response.xpath('//span[@class="lzl_content_main"]').extract()
            new_reply_descs_for_this = []
            for reply_desc_for_this in reply_descs_for_this:
                tmp_reply = reply_desc_for_this.encode('utf-8')
                tmp_r = strip_str.sub('',tmp_reply)
                if tmp_reply not in new_reply_descs_for_this:
                    if tmp_r != '':
                        new_reply_descs_for_this.append(tmp_r)
            #将这些信息组合成需要的信息
            reply_info = {'reply_descs':new_reply_descs,'reply_descs_for_this':new_reply_descs_for_this}
            reply_info_list.append(reply_info)
        item = EcnomicInfoBugItem()
        item['title'] = title
        item['all_people_view'] = reply_info_list
        return item
