# -*- coding: utf-8 -*-
import re
import os
# 3rd party
from lxml import html
import scrapy

from miracledata.url import URL, URLError
try:
    from miracledata.stocks import stocks
except ImportError:
    from miracledata.utils import StocksGetter
    print("read from sina, please wait...")
    StocksGetter.from_sina().get()
    from miracledata.stocks import stocks


urls = URL.from_template(
        "http://quotes.money.163.com/trade/lsjysj_{sid}.html?"\
        "year={year}&season={n}"
    )


def parse_url(url, repo):
    filename = os.path.join(repo, "{0}.{1}Q{2}.csv")
    pattern = re.compile("(http:.*_)(\d+)\.html\?year=(\d+)&season=(\d)")
    match = pattern.match(url)
    if match:
        items = match.groups()
        return filename.format(items[1], items[2], items[3])
    else:
        raise URLError("Invalid URL: " + url)


class NeteasySpider(scrapy.Spider):
    name = "neteasy"
    start_urls = None
    
    def __init__(self, period, *args, **kwargs):
        assert period, "please provide period!!"
        #import pdb; pdb.set_trace()
        if kwargs.get("repo", False):
            self.repo = kwargs.pop("repo")
        else:
            self.repo = "."
        super(NeteasySpider, self).__init__(*args, **kwargs)
        sids = (s[2:] for s in stocks)
        year, n = period.split('-')
        self.start_urls =\
            urls.create_many([{"year":year, "n":n, "sid":sid} for sid in sids])
        
    def parse(self, response):
        #import pdb; pdb.set_trace()
        fullname = parse_url(response.url, self.repo)
        if not fullname:
            return False
        # translate
        maps = {
            u"日期": "date",
            u"开盘价": "open",
            u"最高价": "high",
            u"最低价": "low",
            u"收盘价": "close",
            u"涨跌额": "abschg",
            u"涨跌幅(%)": "chg",
            u"成交量(手)": "volume",
            u"成交金额(万元)": "amount",
            u"振幅(%)": "amplitude",
            u"换手率(%)": "turnover"
        }

        try:
            doc = html.fromstring(response.body)
            heads = doc.xpath('/html/body/div[2]/div[4]/table/thead/tr/th')
            rows = doc.xpath('/html/body/div[2]/div[4]/table/tr')
            headers = [ maps[x.text_content()] for x in heads ]
            lines = []
            for row in rows:
                values = [ td.text_content() for td in row.xpath('./td') ]
                # remove the comma from each value item if any
                lines.append([ ''.join(v.split(',')) for v in values ])

        except Exception as e:
            print(e)
            return False

        with open(fullname, 'wb') as f:
            f.write(','.join(headers) + '\n')
            f.write('\n'.join([','.join(line) for line in lines]))
