# -*- coding: utf-8 -*-
import os

import scrapy

from miracledata.url import URL, URLError
from miracledata.txdates import TXDates
from miracledata.utils import FileReport

#rong zi rong juan
urls = URL.from_template(
    "http://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/"\
    "kind/rzrq/index.phtml?tradedate={date}"
)

class MargintxSpider(scrapy.Spider):
    name = "margintx"
    allowed_domains = ["sina.com.cn"]
    start_urls = None

    def __init__(self, period, *args, **kwargs):
        assert period, "Period dates must provided."
        # get parameters
        if kwargs.get("repo", False):
            self.repo = kwargs.pop("repo")
        else:
            self.repo = "."
        # init
        super(MargintxSpider, self).__init__(*args, **kwargs)
        start_date, end_date = period.split(":")
        dates =({"date": date} for date in TXDates.between(start_date, end_date))
        self.start_urls = urls.create_many(dates)


    def translate(self, word):
        words = {
            u'市场': 'market',
            u'本日融资余额(元)': 'margin debt',
            u'本日融资买入额(元)': 'margin buying',
            u'本日融资偿还额(元)': 'margin pay',
            u'本日融券余量金额(元)': 'securities loan',
            u'序号':"NO.",
            u'股票代码': 'sid',
            u'股票名称': 'name',
            u'余额(元)': 'margin debt',
            u'买入额(元)': 'margin buying',
            u'偿还额(元)': 'margin pay',
            u'余量金额(元)': 'securities loan amount',
            u'余量(股)': 'securities loan shares',
            u'卖出量(股)': 'securities loan selling shares',
            u'偿还量(股)': 'securities loan paying shares',
            u'融券余额(元)': 'securities loan debt',
            u'沪市': "SH",
            u'深市': 'SZ'
        }
        try:
            return words[word]
        except KeyError:
            return '--'

    def parse_market_table(self, table, filename):
        rows = []
        #import pdb; pdb.set_trace()
        # prase header
        tds = table.xpath("./tr[@class='head']/td/text()")
        # extract text from each td
        tds_text = [ td.extract().strip() for td in tds ]
        # get non-blank tds
        tds_take = [ w for w in tds_text if len(w) > 0  ]
        # check the size
        fields = [ self.translate(w) for w in tds_take ]
        header_line = ','.join(fields)
        rows.append(header_line)

        # now deal with the data table rows
        data_trs = table.xpath("./tr[not(@class)]")
        rows += self.extract_data_trs(data_trs)

        # create report
        with FileReport(filename, "wb") as f:
            f.write(rows)


    def parse_sids_table(self, table, filename):
        rows = []
        trs = table.xpath('./tr')
        ### parse header rows 1st + 2nd row
        header_trs = trs[1:3]
        # take the first 3 fields for th field
        headers_zh = [ th.extract() for th in header_trs[0].xpath('./th/text()')[0:3] ]
        headers_zh += [ td.extract() for td in header_trs[1].xpath('./td/text()') ]
        # translate headers from zh into english
        headers_en = [ self.translate(w) for w in headers_zh if len(w) > 0 ]
        header_line = ','.join(headers_en)
        rows.append(header_line)

        # now parse the data rows which starts from the 3rd row
        data_trs = trs[3:]
        rows += self.extract_data_trs(data_trs)

        with FileReport(filename, "wb") as f:
            f.write(rows)


    def extract_data_trs(self, data_trs):
        """trs contain data info only (not headers)"""
        rows = []
        for tr in data_trs:
            # extract td text
            tds_data_values =\
                [ td.extract() for td in tr.xpath('./td/text()|./td/a/text()') ]
            # remove unicode and transform nnn,mmm,..into nnnmmm...
            values = []
            for value in tds_data_values:
                try:
                    val = ''.join(str(value).split(','))
                    values.append(val)
                except UnicodeEncodeError:
                    # check translate table
                    val = self.translate(value)
                    values.append(val)
            # add data line
            data_line = ','.join(values)
            # append data line
            rows.append(data_line)
        return rows

    def parse(self, response):
        tables = response.xpath("//table[@id='dataTable']")
        date = response.url.split('=')[-1]
        import pdb; pdb.set_trace()
        for table in tables:
            trs_num = len(table.xpath("./tr[@class='head']"))
            if trs_num > 5:
                # this for sids
                filename = "margin.sids.{date}.csv".format(date=date)
                fullname = os.path.join(self.repo, filename)
                self.parse_sids_table(table, fullname)
                
            else:
                # this for the market
                filename = "margin.market.{date}.csv".format(date=date)
                fullname = os.path.join(self.repo, filename)
                self.parse_market_table(table, fullname)

