# -*- coding: utf-8 -*-

import scrapy
from weibo.items import WeiboItem
import urlparse
import urllib
import pymongo
import time
import json
from scrapy.http import HtmlResponse
import re
import requests


WEIBO_LOGIN_USER = '415582796@qq.com'
WEIBO_LOGIN_PWD = 'she6879735'
WEIBO_LOGIN_URL = "https://login.sina.com.cn/signup/signin.php"
WEIBO_LOGIN_DOMAIN = 'passport.weibo.com'
WEIBO_LIST_URL = "http://weibo.com/a/aj/transform/loadingmoreunlogin"
WEIBO_DOMAIN = 'weibo.com'
TYPE_CH = '新浪微博'


class Guangzhou(scrapy.Spider):
    name = "guangzhou"
    allowed_domains = [
        WEIBO_LOGIN_DOMAIN,
        WEIBO_DOMAIN,
    ]
    start_urls = [
        WEIBO_LIST_URL+"?ajwvr=6&category=0&page=0&lefnav=0"
    ]


    def __init__(self,cate_id=0,cate_name='热门'):
        self.cate_id = cate_id
        self.cate_name = cate_name

    def parse(self, response):
        str = json.loads(response.body)['data'].encode('utf-8')
        response_new = HtmlResponse(url=response.url,body=str)
        html = response_new.xpath("//div[contains(@class,'UG_list_')]")
        for i in range(len(html)):
            item = WeiboItem()
            item = self.parse_list(html[i])#解析html
            item['created_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
            yield item


        mongo_uri = self.settings.get('MONGO_URI')
        mongo_client = pymongo.MongoClient(mongo_uri)
        mongo_db = mongo_client['weibo'].catalog
        cursor = mongo_db.find({'is_send':0})
        update = mongo_db.update({'is_send':0},{'$set':{'is_send':1}},multi =True)
        #发送数据给php
        r_data = {'data':[vo for vo in cursor]}
        if r_data['data'] :
            requrl = "http://localhost/my_sns_opensns/index.php?s=/generate/sinaweibo/accept_weibo"
            r = requests.post(requrl, data=r_data)
            print r_data

        url_info = urlparse.urlparse(response.url)
        url_query = urlparse.parse_qs(url_info.query, True)
        next_page = int(url_query['page'][0]) +1 #下一页的页码

        next_url = self.get_ajax_url(page = next_page,category=self.cate_id)
        print '要开始抓取请求：' + next_url;
        print 'cateid:%s; catename:%s' % (self.cate_id,self.cate_name)
        print "先休息5秒"
        time.sleep(5) #延迟加载
        yield scrapy.Request(url=next_url,callback=self.parse,dont_filter=True)


    def get_ajax_url(self,category=0,page=0):
        """
        组装ajax列表请求
        :param category: 分类id
        :param page:     页码
        :return:
        """

        # url地址
        url ='http://weibo.com/a/aj/transform/loadingmoreunlogin'
        # 参数
        values = {
            'ajwvr':6,
            'category':category,
            'page':page,
            'lefnav':0
        }
        # 进行参数封装
        query = urllib.urlencode(values)
        # 组装完整url
        ajax_url = url+"?"+query
        return ajax_url



    def parse_list(self, tag):
        """
        解析目录列表
        :param response:
        :return:
        """
        content = {}
        content['detail_url'] = tag.xpath(".//@href").extract_first()
        content['mid'] = tag.xpath("./@mid").extract_first()
        if (content['mid'].find(':') > 0 ):
            mid_ary = content['mid'].split(':')
            content['mid'] = mid_ary[1]

        content['text'] = tag.xpath(".//h3[contains(@class,'list_title_')]").extract_first()
        subinfo = tag.xpath(".//div[contains(@class,'subinfo_box clearfix')]")
        content['name'] = subinfo.xpath("./a[2]/span/text()").extract_first()
        content['publish'] = subinfo.xpath("./span[1]/text()").extract_first()
        content['user_url'] = subinfo.xpath("./a[2]/@href").extract_first()

        if (content['user_url'].find(WEIBO_DOMAIN) < 0) :
            content['user_url'] = '//'+WEIBO_DOMAIN+content['user_url']

        #用户uid
        content['user_id'] = urlparse.urlparse(content['user_url']).path
        #头像，保存100*100
        content['avatar'] = subinfo.xpath("./a[1]/span/img/@src").extract_first()
        content['avatar'] = re.sub('\.50/','.100/',content['avatar'])
        content['type'] = TYPE_CH+'-'+self.cate_name
        content['is_send'] = 0

        return content


