# -*- coding: utf-8 -*-
# @Date    : 2016-12-16 11:29:46
# @Author  : fancy (fancy@thecover.cn)

import time
import random
from selenium import webdriver
from selenium.common.exceptions import TimeoutException

from scrapy import Request
from scrapy.spider import Spider
from scrapy.selector import Selector

from ..items import init_item
from ..settings import P_path


class Weixin(Spider):
    name = 'weixin'
    stop_num = None
    url = 'http://weixin.sogou.com/pcindex/pc/pc_%s/%s.html'
    ccode = True
    count = 0

    def start_requests(self):
        l1 = range(9, 20)
        random.shuffle(l1)
        l2 = range(1, 20)
        random.shuffle(l2)
        for a in l1:
            for i in l2[:1]:
                if self.count >= 3:
                    break
                self.count += 1
                url = self.url % (a, i)
                yield Request(url, callback=self.parse)

    def parse(self, response):
        urls = response.xpath('//li/div[2]/div/a/@href').extract()
        for url in urls:
            yield Request(url, callback=self.detail_parse)

    def detail_parse(self, response):
        if ('为了保护你的网络安全' in response.body) and ('请输入验证码' in response.body):
            print 'fuck 微信验证码出现'
            # time.sleep(0.5)
            # if self.ccode:
                # self.ccode = False
            return Request(response.url, callback=self.detail_parse, dont_filter=True)
        item = init_item()
        item['name'] = response.xpath(
            '//*[@class="profile_nickname"]/text()').extract()[0].strip()
        item['wxh'] = response.xpath(
            '//*[@class="profile_account"]/text()').extract()[0].strip().split()[1]
        item['desc'] = response.xpath(
            '//*[@class="profile_desc_value"]/@title').extract()[0].strip()
        item['avatar'] = response.xpath(
            '//*[@class="radius_avatar profile_avatar"]/img/@src').extract()[0]
        try:
            item['zhuti'] = response.xpath(
                '//*[@class="profile_desc_value"]/text()').extract()[1]
        except Exception, e:
            print e
            item['zhuti'] = ''
        return item


    @staticmethod
    def get_connect():
        import sqlite3
        cnx = sqlite3.connect('weixin.db')
        cur = cnx.cursor()
        return cnx, cur

    # def start_requests(self):     # update gzh's avatar
    #     self.driver = webdriver.PhantomJS(executable_path=P_path)
    #     self.driver.set_page_load_timeout(1)
    #     sql = 'select wxh from gzh where avatar is NULL'
    #     cnx, cur = self.get_connect()
    #     cur.execute(sql)
    #     self.sources = cur.fetchall()
    #     cnx.close()
    #     yield Request('http://www.baidu.com', callback=self.parse1)

    def clean_windows(self):
        ''' make sure there's one window
        '''
        windows = self.driver.window_handles
        if len(windows) <= 1:
            return
        for window in windows[1:]:
            self.driver.switch_to_window(window)
            self.driver.close()
        self.driver.switch_to_window(windows[0])

    def get_page(self, url):
        self.clean_windows()
        count = 2
        while  count:
            count -= 1
            try:
                self.driver.get(url)
            except TimeoutException:
                self.driver.get(url)
            page_source = self.driver.page_source
            # del cookies if baned by sogou, then retry
            if ('antispider' in self.driver.current_url) or\
                                    ('antispider' in page_source):
                self.driver.delete_all_cookies()
                time.sleep(0.5)
                self.driver.get(url)
                time.sleep(0.5)
            else:
                break
        return page_source

    def parse1(self, response):
        url_base = 'http://weixin.sogou.com/weixin?type=1&query=%s&ie=utf8&_sug_=y&_sug_type_='
        for wxh in self.sources:
            try:
                url = url_base % wxh[0]
                try:
                    page_source = self.get_page(url)
                except:
                    continue
                sel = Selector(text=page_source)
                item = init_item()
                item['avatar'] = sel.xpath(
                    '//*[@class="news-list2"]/li[1]/div/div[1]//img/@src').extract()[0]
                item['wxh'] = wxh[0]
                yield item
            except Exception, e:
                print e


class Vccoo(Spider):
    name = 'vccoo'
    start_urls = ['http://www.vccoo.com/leaderboard']

    def parse(self, response):
        cate = response.xpath('//div[@class="article-wrap"]/div')
        for elem in cate:
            type_ = elem.xpath('div/h3/text()').extract()[0]
            urls = elem.xpath('ul[1]/li/div[1]/a/@href').extract()
            names = elem.xpath('ul[1]/li/div[1]/a/@title').extract()
            avatars = elem.xpath('ul[1]/li/div[1]/a/img/@src').extract()
            qrcodes = elem.xpath('.//span[@class="code-img"]/a/img/@src').extract()
            desc = elem.xpath('ul[2]/li/a/text()').extract()
            for i in range(len(names)):
                try:
                    item = init_item()
                    item['name'] = names[i]
                    item['avatar'] = avatars[i]
                    item['qrcode'] = qrcodes[i]
                    item['type'] = type_
                    item['desc'] = desc[i]
                    req = Request(urls[i], callback=self.parse1)
                    req.meta.update({'item': item})
                    yield req
                except Exception, e:
                    print e

    def parse1(self, response):
        item = response.meta['item']
        item['wxh'] = response.xpath(
            '//*[@class="shareBoxOne-wechat"]/i[2]/text()').extract()[0].strip()
        yield item
