# -*- coding: utf-8 -*-
import re
import datetime
from scrapy import Request, FormRequest

from spider.items import CreditInfoItem
import time
from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
from spider.db import Session
from sqlalchemy import desc
from spider.models import CurrListedCorp, PeriodList
import urllib

from urllib import unquote
import requests
import urllib2

class CrrditInfoSpider(CrawlSpider):
    """上市公司评级"""
    name = "credit_info"
    allowed_domains = ['http://www.bgcheck.cn/MemberCenter/FirmCredit/Search.html']

    profit_url_pattern = 'http://www.bgcheck.cn/MemberCenter/FirmCredit/Search.html?Keywords={}'


    # 脚本入口
    def start_requests(self):
        session = Session()
        try:

            stock_cd_market_part_list = session.query(
                CurrListedCorp.corp_name
            ).all()


            for stock_cd_market_part in stock_cd_market_part_list:

                stock_cd = stock_cd_market_part[0].encode('utf8')

                #print stock_cd

                #req = urllib2.Request(profit_url_pattern)
                #req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:33.0) Gecko/20100101 Firefox/33.0')

                data = {
                    '__EVENTTARGET': 'AspNetPagerPaging',
                    '__VIEWSTATEGENERATOR': 'F7914D64',
                    '__EVENTARGUMENT': '4'

                }
                #html_post = requests.post(profit_url_pattern, data=data)


                yield FormRequest(
                    url=self.profit_url_pattern.format(
                        urllib.quote(stock_cd)
                    ),
                    formdata=data,
                    callback=self.parse
                )

        finally:
            session.close()

    def parse(self, response):

        #print response.url
        #print response.encoding
        #print response.body

        credit = response.selector.xpath('//*[@id="content1"]/ul[1]/li[1]/span/a[1]/text()').extract()

        corp_name = response.selector.xpath('//*[@id="content1"]/ul[1]/li[1]/a[1]/em/text()').extract()

        item = CreditInfoItem()
        item['corp_name'] = corp_name[0]

        item['credit'] = credit[0]

        item['time'] = datetime.datetime.now().strftime(
            '%Y%m%d'
        )

        return item


