#!/usr/bin/env python
# encoding=utf-8
import json
import re
import sys
import time
import urllib

import requests
from scpy.logger import get_logger
from scpy.xawesome_crawler import AsyncCrawler
from xtls.basecrawler import BaseCrawler
from xtls.codehelper import trytry, timeit
from xtls.util import BeautifulSoup

from config import CrawlerStatus

reload(sys)
sys.setdefaultencoding("utf-8")

logger = get_logger(__file__)
HOST = 'http://120.26.93.104:56789/'
unix_timestamp_2_time_str = lambda stamp: time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(long(stamp)))


class WechatCrawler(BaseCrawler):
    SEARCH_URL = 'http://weixin.sogou.com/weixin?type=2&query={0}'

    def __init__(self, key):
        super(WechatCrawler, self).__init__()
        self.key = key
        self.count = 0
        self.news_list = {}
        self.search_url = WechatCrawler.SEARCH_URL.format(urllib.quote(key.decode('utf-8').encode('gbk')))

    @classmethod
    def parse_news_item(cls, item):
        title = item.find('h4')
        footer = item.find('div', attrs={'class': 's-p'})
        result = {
            'title': title.getText().strip(),
            'url': title.find('a')['href'].strip(),
            'source': footer.find('a', attrs={'id': 'weixin_account'})['title'],
            'time': unix_timestamp_2_time_str(re.findall(ur"\('(\d+?)'\)", footer.find_all('script')[-1].getText())[0]),
            'abstract': item.find('p').getText(),
            'content': item.find('p').getText(),
        }
        if result['url'].startswith('/'):
            result['url'] = 'http://weixin.sogou.com' + result['url']
        return result

    def find_302_location(self, url):
        time.sleep(0.5)
        url = self._request.get(url, allow_redirects=False).headers['Location']
        if not url.startswith('http://mp.weixin'):
            raise RuntimeError('captcha catched')
        return url

    def find_news_list(self):
        content = self.get(self.search_url)
        if u'（不含引号）的搜索结果' in content:
            return CrawlerStatus.NO_RESULT
        if u'您的访问过于频繁，为确认本次访问为正常用户行为，需要您协助验证。' in content:
            return CrawlerStatus.GOT_CAPTCHA
        soup = BeautifulSoup(content)
        results = soup.find_all('div', attrs={'class': 'wx-rb'})
        if not len(results):
            return CrawlerStatus.NO_RESULT
        for item in results:
            with trytry():
                wechat_item = WechatCrawler.parse_news_item(item)
            wechat_item['url'] = self.find_302_location(wechat_item['url'])
            wechat_item['keys'] = [self.key]
            self.news_list[wechat_item['url']] = wechat_item
        self.count = len(self.news_list)
        with trytry():
            self.count = int(soup.find('resnum', attrs={'id': 'scd_num'}).getText().replace(',', '').strip())
        return CrawlerStatus.SUCCESS

    def run(self):
        rst = self.find_news_list()
        if rst == CrawlerStatus.NO_RESULT:
            print 'CRAWLER-NO-RESULT'
            return CrawlerStatus.NO_RESULT
        elif rst == CrawlerStatus.GOT_CAPTCHA:
            print 'CRAWLER-GOT-CAPTCHA'
            return CrawlerStatus.GOT_CAPTCHA

        data = WechatAsyncCrawler(self.news_list)
        data.run()

        return self.news_list.values()


class WechatAsyncCrawler(AsyncCrawler):

    def __init__(self, news_list, **kwargs):
        super(WechatAsyncCrawler, self).__init__(news_list.keys(), **kwargs)
        self.news_list = news_list

    def on_request(self, url):
        return {
            'user_agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36',
            'connect_timeout': 10, 'request_timeout': 10
        }

    def on_response(self, url, html):
        logger.info('parse %s' % url)
        soup = BeautifulSoup(html)
        div = soup.find('div', attrs={'id': 'page-content'})
        if not div:
            return
        content = div.getText().strip()
        if content:
            self.news_list[url]['content'] = content


@timeit
def search(key):
    for times in xrange(3):
        try:
            data = WechatCrawler(key).run()
            if data == CrawlerStatus.GOT_CAPTCHA:
                if times == 3:
                    return None
                logger.info('ooooops, captcha!!! sleep 30s.')
                time.sleep(30)
            return data
        except:
            logger.info('ooooops, captcha!!! sleep 30s.')
            time.sleep(30)


def main():
    while True:
        resp = requests.get(HOST + 'pullJob?type=wechat')
        if resp.status_code / 100 != 2:
            logger.info('no more jobs, sleep 10s.')
            time.sleep(10)
            continue
        company_name = unicode(resp.content)
        data = search(company_name)
        if not isinstance(data, list):
            data = ''
        else:
            data = json.dumps(data, ensure_ascii=False)
        requests.post(HOST + 'report', data={
            'companyName': company_name,
            'type': 'wechat',
            'data': data
        })


if __name__ == '__main__':
    main()
