# -*- coding: utf-8 -*-
import scrapy
import time
from half.items import HalfItem
from pymongo import MongoClient
from scrapy.cmdline import execute
import os,sys
from scrapy_redis.spiders import RedisSpider
import json
import urllib3

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

class AsianSpider(RedisSpider):
    name = 'asian'
    allowed_domains = ['vip.win007.com']
    # start_urls = ['http://vip.win007.com/']
    redis_key = 'asian:start_urls'
    client = MongoClient('mongodb://rain:8800@192.168.101.80/qiutan')
    db = client['qiutan']
    col = db['match_id']

    def mongo_server(self):
        for match in self.col.find(no_cursor_timeout=True):
            yield {
                'thirdId':match['thirdId'],
                'matchTime':match['matchTime'],
                'thirdHomeId':match['thirdHomeId'],
                'thirdAwayId':match['thirdAwayId']
            }

    def format_time_stamp_by_field(self,source):
        dt_strip = time.strptime(source,'%d/%m/%Y %H:%M:%S')
        return int(time.mktime(dt_strip) * 1000)

    def start_requests(self):
        for item in self.mongo_server():
            thirdId = item['thirdId']
            matchTime = item['matchTime']
            thirdHomeId = item['thirdHomeId']
            thirdAwayId = item['thirdAwayId']
            url = 'http://vip.win007.com/AsianOdds_n.aspx?id={}&t=1'.format(thirdId)
            yield scrapy.Request(url,callback=self.parse,dont_filter=False,meta={'download_timeout':2,'thirdId':item['thirdId'],'matchTime':item['matchTime'],'thirdHomeId':item['thirdHomeId'],'thirdAwayId':item['thirdAwayId']})

    def parse(self, response):
        infos = response.xpath('//div[@id="webmain"]/table[1]//tr')
        item = HalfItem()
        data_lst = []
        for info in infos[2:-2]:
            data = {}
            companyName = info.xpath('./td[1]/text()').extract_first()
            if companyName != None:
                data['companyName'] = companyName
                data['companyID'] = info.xpath('./td[2]/span/@companyid').extract_first()
                data['odds1Cp'] = info.xpath('./td[3]/text()').extract_first()
                data['handicapCp'] = info.xpath('./td[4]/@goals').extract_first()
                data['odds2Cp'] = info.xpath('./td[5]/text()').extract_first()
                data['odds1Js'] = info.xpath('./td[9]/text()').extract_first()
                data['handicapJs'] = info.xpath('./td[10]/@goals').extract_first()
                data['odds2Js'] = info.xpath('./td[11]/text()').extract_first()
                data_lst.append(data)
        item['thirdMatchId'] = int(response.meta['thirdId'])
        item['matchDate'] = self.format_time_stamp_by_field(response.meta['matchTime'])
        item['matchHomeId'] = int(response.meta['thirdHomeId'])
        item['matchAwayId'] = int(response.meta['thirdAwayId'])
        item['AsianOdds'] = data_lst
        if data_lst != []:
            yield item
# 'gb18030'
if __name__ == '__main__':
    sys.path.append(os.path.dirname(os.path.abspath(__file__)))
    execute('scrapy crawl asian'.split())