
#encoding: utf-8

import scrapy
from scrapy.http.request.form import FormRequest
from cStringIO import StringIO
from scrapy.utils.project import get_project_settings
import damatuWeb
import datetime
import os
import random
import logging
SETTINGS = get_project_settings()

logger = logging.getLogger('scrapy')
class VMZSpider(scrapy.Spider):
    name = "vmz"
    allowed_domains = ["vwz.cn"]
    download_delay = 1.8
    url = 'http://m.vwz.cn/index.php/User/proxylogin'
    login_url = 'http://m.vwz.cn/index.php/User/proxyLoginCheck'

    #设置 Headers
    headers_dict = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8",
        "Connection": "keep-alive",
        "Host": "m.vwz.cn",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36"
    }

    def __init__(self, search_date=''):
        
        if search_date:
            self.search_day = search_date
            self._search_day = datetime.datetime.strptime(self.search_day, '%Y-%m-%d').date() 
        else:
            today = datetime.date.today()
            self._search_day = today - datetime.timedelta(days=1)
            self.search_day = self._search_day.strftime('%Y-%m-%d')
        if not os.path.exists('data'):
            os.mkdir('data')
        filename = 'data/%s.txt' % self.search_day
        self.file = open(filename, 'w')
        DMTUSER =SETTINGS['DMTUSER']
        DMTPWD = SETTINGS['DMTPWD']
        ID = SETTINGS['ID']
        KEY = SETTINGS['KEY']
        HOST = SETTINGS['HOST']
        self.dmt = damatuWeb.DamatuApi(DMTUSER,DMTPWD, ID, KEY, HOST)
        self.dmt_retry = 5
        self.login_retry = 5
        

    def start_requests(self):
        yield scrapy.Request(
            url=self.url,
            headers=self.headers_dict,
            meta={
                "cookiejar": 1
            },
            callback=self.request_captcha
        )

    def request_captcha(self, response):
        # 获得验证码的地址
        captcha_url = "http://m.vwz.cn/Public/code.php"
        # 准备下载验证码
        # 获取请求
        yield scrapy.Request(
            url=captcha_url,
            headers=self.headers_dict,
            meta={
                "cookiejar": response.meta["cookiejar"],
            },
            callback=self.download_captcha
        )

    def download_captcha(self, response):
        # 下载验证码
        from PIL import Image
        im = StringIO(response.body)
        im = Image.open(im)
        im.show()
        logger.info(u'请输入验证码, 更新验证码输入no')
        captcha = raw_input()
#         captcha = self.dmt.decode(response.body, 200)
        logger.info(u'验证码[%s]' % captcha)
        # 输入账号和密码
        if isinstance(captcha, int):
            # 打码平台验证失败次数
            self.dmt_retry -= 1
            if self.dmt_retry > 0:
                captcha_url = "http://m.vwz.cn/Public/code.php?t=%s" % random.random()
                yield scrapy.Request(
                url=captcha_url,
                meta={
                    "cookiejar": response.meta["cookiejar"],
                },
                callback=self.download_captcha
                )
        else:        
            account = '21848027@qq.com'
            pwd = '888888'
            yield scrapy.FormRequest(
                url=self.login_url,
                headers=self.headers_dict,
                formdata={'account': account,
                          'pwd': pwd, 
                          'check_code': captcha,
                          'btn_login': u'登录' },
                meta={
                    "cookiejar": response.meta["cookiejar"],
                },
                callback=self.after_login
            )
        
    def after_login(self, response):
        url = 'http://m.vwz.cn/fund_mana/realtimeFlow.html'
#         import IPython
#         IPython.embed()
        print response.url
        # 登陆失败重新登陆
        is_login = response.xpath('id("href")/@href').extract()[0]
        if 'javascript:history.back(-1);' in is_login:
            logger.warning(u'登录失败')
            self.login_retry -= 1
            if self.login_retry > 0:
                captcha_url = "http://m.vwz.cn/Public/code.php?t=%s" % random.random()
                yield scrapy.Request(
                    headers=self.headers_dict,
                    url=captcha_url,
                    meta={
                        "cookiejar": response.meta["cookiejar"],
                    },
                    callback=self.download_captcha
                    )
            else:
                logger.warn(u'登录失败次数超出 设定次数')
        else:
            logger.info(u"登录成功！")
            yield scrapy.Request(
                url=url,
                meta={
                    "cookiejar": response.meta["cookiejar"],
                },
                callback=self.parse_liushui)
        
    def parse_liushui(self, response):
        urls = response.xpath("id('main')//a[3]/@href").extract()
        for url in urls:
            url = response.urljoin(url)
            yield scrapy.Request(
            url=url,
            headers=self.headers_dict,
            meta={
                "cookiejar": response.meta["cookiejar"],
            },
            callback=self.parse_realtimeFlow)
            
            
    def parse_date(self, response):
        # form data
        s_member_id = response.xpath("//input[@name='s_member_id']/@value").extract()[0]

        s_trade_type = ""
        s_pay_type = ""
        tuikuan = ""
        s_c_date_min = self.search_day
        s_c_date_max = self.search_day
        __hash__ = response.xpath("//input[@name='__hash__']/@value").extract()[0]
        print __hash__
        yield scrapy.FormRequest(
            url='http://m.vwz.cn/fund_mana/realtimeFlow.html',
            headers=self.headers_dict,
            formdata={'s_member_id': s_member_id,
                      's_trade_type': s_trade_type, 
                      's_pay_type': s_pay_type,
                      'tuikuan': tuikuan,
                      's_c_date_min':s_c_date_min,
                      's_c_date_max': s_c_date_max,
                      '__hash__': __hash__},
            meta={
                "cookiejar": response.meta["cookiejar"],
                },
            callback=self.parse_realtimeFlow
            )
        
    def parse_realtimeFlow(self, response):
        trs = response.xpath("//div[@class='box']//tr")
#         today = datetime.date.today()
        go_next = True
        for tr in trs:
            td = tr.xpath('td/text()').extract()
            if td:
                the_date = td[1][:10]
                _date = datetime.datetime.strptime(the_date, '%Y-%m-%d').date()
                if the_date == self.search_day:
                    td = map(lambda x:x.encode('utf-8'), td)
                    self.file.write('|'.join(td))
                    self.file.write('\n')
                    yield {u'商家名': td[0],
                           u'交易日期': td[1],
                           u'流水总额': td[2],
                           u'退款金额': td[3],
                           u'实收金额': td[4],
                           u'订单号': td[5],
                           u'交易类型': td[6],
                           u'交易流水号': td[7]
                           }
                elif _date > self._search_day:
                    pass
                else:
                    go_next = False
                    
        if go_next:
            next_page = u'下一页'
            next_url = response.xpath("//a[text()='%s']/@href" % next_page)
            if next_url:
                next_url = response.urljoin(next_url.extract()[0])
                yield scrapy.Request(
                url=next_url,
                headers=self.headers_dict,
                meta={
                    "cookiejar": response.meta["cookiejar"],
                },
                callback=self.parse_realtimeFlow)
            

