# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.http import Request
from myproject.items import DoubanGroupDiscussionItem
import os
import time

class DoubanSpider(scrapy.Spider):
    name = 'douban'
    handle_httpstatus_list = [404, 501, 502, 503,403]
    allowed_domains = ['www.douban.com']
    #  allowed_domains = ['shudong.phpcool.cn']
    # 和深圳有关的小组
    start_urls = ['https://www.douban.com/group/search?cat=1019&q=%E6%B7%B1%E5%9C%B3/']
    #  start_urls = ['http://shudong.phpcool.cn/show_ip/']

    def parse(self, response):
        # print(response.css("h1::text").extract_first())
        # self.get_new_ip()
        # time.sleep(1)
        if response.status in [403,302]:
           print(response.status)
           time.sleep(3)
           yield Request(url=response.url, callback=self.parse,dont_filter=True)
        if response.status in [404, 501, 502, 503]:
           print(response.status)
           time.sleep(2)
           yield Request(url=response.url, callback=self.parse,dont_filter=True)
        list = response.css('.groups .result')
        if (list):
            for li in list:
                title = li.css('.pic a::attr(title)').extract_first()
                if title:
                    isGo = self.keyword_filter(title)
                    if not isGo:
                        group_url = li.css('.pic a::attr(href)').extract_first()
                        group_discussion_url = group_url + "discussion?start=50"
                        yield Request(url=group_discussion_url, callback=self.group_discussion)

        next_url = response.css('.paginator  span.next a::attr(href)').extract_first()
        if next_url:
            yield Request(url=next_url, callback=self.parse)


    def group_discussion(self,response):
        list = response.css('.article table tr')
        group_name = response.css('#g-side-info .title a::text').extract_first()
        group_menber_num = response.css('.member-status  i::text').extract_first()
        if(list):
            for li in list[1:]:
                discussion_title = li.css('.title a::attr(title)').extract_first()
                if(discussion_title):
                    isGo = self.keyword_filter(discussion_title)
                    if not isGo:
                        discussion_url = li.css('.title a::attr(href)').extract_first()
                        item = DoubanGroupDiscussionItem()
                        item['group_name'] = group_name
                        item['discussion_title'] = discussion_title
                        item['discussion_url'] = discussion_url
                        yield item

        next_url = response.css('.paginator  span.next a::attr(href)').extract_first()
        if next_url:
            yield Request(url=next_url, callback=self.group_discussion)

    def keyword_filter(self,txt):
        isInclude = True
        string_keyword = ['租房','创业','兼职','招聘','求职','合租','无中介','市场','买房','房屋','深圳看戏','尤克里里','二手']
        for i in range(len(string_keyword)):
            result = re.findall(string_keyword[i], txt)

            if len(result) == 0:
                isInclude = False
            else:
                isInclude = True
                return True

        return isInclude

    def get_new_ip(self):
        try:
            print("get new ip from tor...")
            os.system("""(echo authenticate '"password"'; echo signal newnym; echo \
                                           quit) | nc localhost 9050""")
            time.sleep(1)
            p = os.popen('sudo service privoxy restart')
            x = p.readlines()
            for line in x:
                print('ssss=' + line)

        except:
            print("get new ip from tor faith...")

        time.sleep(3)

        pass


