# -*- coding: utf-8 -*-
import scrapy
from wb.items import *
import re
class WeiboSpider(scrapy.Spider):
    name = "weibo"
    allowed_domains = ["weibo.cn"]
    url='http://weibo.cn/'
    start_urls = ['1799468404','5658208771','3804964543','5285316854','5268395811','6122777230','5503864294',
                  '2973825563','6042041854','5399305296','6112967033','5958250307','5899993712'] #爬取入口微博ID
    task_set=set(start_urls) #待爬取集合
    tasked_set=set()    #已爬取集合

    def start_requests(self):
        while len(self.task_set)>0:
            _id=self.task_set.pop()
            if _id in self.task_set:
                raise CloseSpider(reason="已存在该数据 %s" %(_id))
            self.tasked_set.add(_id)
            
            info_url=self.url+_id+'/profile'
            other_url=self.url+_id
            
            info_item=ProfileItem()
            
            following_url=other_url+"/follow"
            following_item=FollowingItem()
            following_item["_id"]=_id
            following_item["relationship"]=[]

            follower_url=other_url+"/fans"
            follower_item=FollowedItem()
            follower_item["_id"]=_id
            follower_item["relationship"]=[]
            yield scrapy.Request(info_url,
            meta={"item":info_item},callback=self.account_parse,dont_filter=True)

            yield scrapy.Request(following_url,
            meta={"item":following_item},callback=self.relationship_parse,dont_filter=True)

            yield scrapy.Request(follower_url,
            meta={"item":follower_item},callback=self.relationship_parse,dont_filter=True)

            yield scrapy.Request(url="http://weibo.cn/%s/profile?filter=1&page=1" %_id,callback=self.parse_tweets,dont_filter=True)

    def account_parse(self,response):
        #print(response.body)
        item=response.meta["item"]
        sel=scrapy.selector.Selector(response)
        #/html/body/div[3]/table/tbody/tr/td[2]/div/a[2]
       
        # profile_url=sel.xpath("/html/body/div[3]/table/tbody/tr/td[2]/div/a[2]/@href").extract()

        # print("--------")
        # print("profile is %s" %profile_url)
        # print("--------")

        #/html/body/div[3]/div
        n=0
        counts=sel.xpath("/html/body/div[3]/div").extract_first()
        item['_id']=re.findall(u'.*?/(\d+)/profile',response.url)[0]+"_"+str(n)
        profile_url=self.url+str(item['_id'])+'/info'
        item['tweet_stats'] = re.findall(u'微博\[(\d+)\]', str(counts))[0] if len(re.findall(u'微博\[(\d+)\]', str(counts))) !=0 else ''
        item['following_stats'] = re.findall(u'关注\[(\d+)\]', str(counts))[0] if len(re.findall(u'关注\[(\d+)\]', str(counts))) !=0 else ''
        item['follower_stats'] = re.findall(u'粉丝\[(\d+)\]', str(counts))[0] if len(re.findall(u'粉丝\[(\d+)\]', str(counts))) !=0 else ''
        print('---Counts---')
        print(str(item))
        print('---Counts---')

        if int(item['tweet_stats']) <4500 and int(item['following_stats'])>1000 and int(item['follower_stats'])<500:
            raise CloseSpider("僵尸粉")
        yield scrapy.Request(str(profile_url),meta={"item":item},callback=self.profile_parse,dont_filter=True)
        

    def profile_parse(self,response):
        #print(response.body)

        item=response.meta['item']
        sel=scrapy.selector.Selector(response)
        #/html/body/div[6]
        info=sel.xpath("/html/body/div[6]").extract_first()
        item["profile_pic"] = sel.xpath("//div[@class='c']/img/@src").extract_first()
        item["nick_name"] = re.findall(u'昵称:(.*?)<br>',str(info))[0] if len(re.findall(u'昵称:(.*?)<br>',str(info))) !=0 else ''
        item["sex"] = re.findall(u'性别:(.*?)<br>',str(info))[0] if len(re.findall(u'性别:(.*?)<br>',str(info))) !=0 else ''
        item["location"] = re.findall(u'地区:(.*?)<br>',str(info))[0] if len(re.findall(u'地区:(.*?)<br>>',str(info))) !=0 else ''
        item["birthday"] = re.findall(u'生日:(.*?)<br>',str(info))[0] if len(re.findall(u'生日:(.*?)<br>',str(info))) !=0 else ''
        item["bio"] = re.findall(u'简介:(.*?)<br>',str(info))[0] if len(re.findall(u'简介:(.*?)<br>',str(info))) !=0 else ''
        yield item

    def relationship_parse(self,response):
        #print(response.body)
        n=0
        item=response.meta['item']
        sel=scrapy.selector.Selector(response)
        #/html/body/table[1]/tbody/tr/td[2]/a[2]
        #//table/tbody/tr/td[last()]/a[last()]/@href
        uids=sel.xpath("//table").extract()
        new_uids=[]
        for uid in uids:
            if "uid" in uid:
                new_uids.append(re.findall('.*?uid=(\d+)&.*?',str(uid))[0])
            else:
                try:
                    new_uids.append(re.findall('/(\d+)', str(uid))[0])
                except:
                    print('------',uid)
        item['relationship'].extend(new_uids)
        print('---关系数---')
        print(len(item['relationship']))
        print('---关系数---')

        for i in new_uids:
            if i not in self.tasked_set:
                self.task_set.add(i)
        print('id库:'+str(len(self.task_set)))
        next_page=sel.xpath("//*[@id='pagelist']/form/div/a[text()='下页']/@href").extract_first()
        if next_page:
            yield scrapy.Request("http://weibo.cn"+next_page, meta={"item": item},callback=self.relationship_parse,dont_filter=True)
        else:
            yield item
    def parse_tweets(self, response):
        """ 抓取微博数据 """
        n=0
        selector = scrapy.selector.Selector(response)
        ID = re.findall('(\d+)/profile', response.url)[0]
        divs = selector.xpath('body/div[@class="c" and @id]')
        for div in divs:
            try:
                tweetsItems = TweetsItem()
                id = div.xpath('@id').extract_first()  # 微博ID
                content = div.xpath('div/span[@class="ctt"]//text()').extract()  # 微博内容
                cooridinates = div.xpath('div/a/@href').extract()  # 定位坐标
                like = re.findall('赞\[(\d+)\]', div.extract())  # 点赞数
                transfer = re.findall('转发\[(\d+)\]', div.extract())  # 转载数
                comment = re.findall('评论\[(\d+)\]', div.extract())  # 评论数
                others = div.xpath('div/span[@class="ct"]/text()').extract()  # 求时间和使用工具（手机或平台）

                tweetsItems["_id"] = ID +"-"+str(n)+ "-" + id
                tweetsItems["ID"] = ID
                if content:
                    tweetsItems["Content"] = " ".join(content).strip('[位置]')  # 去掉最后的"[位置]"
                if cooridinates:
                    cooridinates = re.findall('center=([\d.,]+)', cooridinates[0])
                    if cooridinates:
                        tweetsItems["Co_oridinates"] = cooridinates[0]
                if like:
                    tweetsItems["Like"] = int(like[0])
                if transfer:
                    tweetsItems["Transfer"] = int(transfer[0])
                if comment:
                    tweetsItems["Comment"] = int(comment[0])
                if others:
                    others = others[0].split('来自')
                    tweetsItems["PubTime"] = others[0].replace(u"\xa0", "")
                    if len(others) == 2:
                        tweetsItems["Tools"] = others[1].replace(u"\xa0", "")
                yield tweetsItems
            except Exception:
                pass
            print('---微博---')
            print(tweetsItems)
            print('---微博---')

        url_next = selector.xpath('body/div[@class="pa" and @id="pagelist"]/form/div/a[text()="下页"]/@href').extract()
        if url_next:
            yield scrapy.Request(url=self.url + url_next[0], callback=self.parse_tweets, dont_filter=True)
