"""
This module will be fetch all stock ( A share ) from sina.
Stocks might be changed..so need to refresh/sync all stock
IDs periodically.
"""
import os
import urllib2
import json
import datetime as dt
from socket import timeout as TimeOutError

BASEDIR = os.path.dirname(os.path.abspath(__file__))

class FileReport(object):
    """
    >>> ds = read_lines(url)
    >>> with FileReport("out.csv") as f:
            f.add(ds)
    """
    def __init__(self, filename, mode="wb"):
        self.filename = filename
        self.f = open(filename, mode)

    def write(self, ds, ending='\n', freq=500):
        """add lines into file"""
        lines = []
        n = 0
        for line in ds:
            lines.append(line)
            n += 1
            if n % freq == 0:
                self.f.write(ending.join(lines) + ending)
                self.f.flush()
                lines = []
        left = n % freq
        if left > 0:
            self.f.write(ending.join(lines) + ending)
        return n

    def from_dicts(self, rows, headers, freq=100):
        pass

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.f.close()


class StocksGetter(object):
    """
    fetch all stock (A share) ID from sina.

    parameters:
    ------------
        date: the date to fetch
        feed: python feed to store stock ids

    """
    _url="http://money.finance.sina.com.cn/d/api/openapi_proxy.php/?"\
        "__s=[[%22hq%22,%22hs_a%22,%22%22,0,{page},40]]&"\
        "callback=FDC_DC.theTableData"

    _headers =  {
        "Accept": "text/html,application/xhtml+xml,application/xml",
        "User-Agent":"Mozilla/5.0"
    }

    @classmethod
    def from_sina(cls):
        return cls()

    def __init__(self, date=None, count=0):
        self.date = date
        self.count = count

    def httpget(self, url):
        try:
            req = urllib2.Request(url)
            for k, v in self._headers.iteritems():
                req.add_header(k, v)
            return urllib2.urlopen(req).read()
        except (urllib2.URLError, TimeOutError) as e:
            print("error, skip %s" % url)
            return None

    def before_gen_urls(self):
        url = self._url.format(page=1)
        result = self.httpget(url)
        data = json.loads(
            result.split('([')[1].strip('\n\)\]')
        )
        self.count = data['count']
        self.date  = data['day']
        return self.date, self.count

    def gen_urls(self):
        left = self.count % 40
        if left > 0:
            n = self.count / 40 + 1
        else:
            n = self.count / 40
        for i in xrange(1,n+1):
            yield self._url.format(page=i)

    def gen_lines(self, l=4, keepnames=True):
        """
        Generate lines of sids

        parameters
        -----------
        l: line size, place how many sids into one line
        """
        names = ['sid,name']

        date, count = self.before_gen_urls()
        yield "#automatically update"
        yield "#date {date}, count {count}".format(
            date = date, count = count
        )
        yield "stocks = {"

        for url in self.gen_urls():
            print(url)
            rsp = self.httpget(url)
            data = json.loads(
                rsp.split('([')[1].strip('\n\)\]')
            )
            items = data['items']
            #import pdb; pdb.set_trace()
            ids = [
                '"{0}"'.format(item[0].encode('utf-8')) for item  in items
            ]
            # keep stock names and sid mappings
            names += [
                '%s,%s' % (item[1].encode('utf-8'), item[2].encode('utf-8')) \
                for item in items
            ]
            #import pdb; pdb.set_trace()
            # defaults to place 4 stock ids per line
            n, m = len(ids) / l, len(ids) % l
            groups = [ ids[i*l:(i+1)*l] for i in xrange(0,n)]
            lines = [ "  " + ",".join(group) + ',\n' for group in groups ]
            if m > 0:
                addline = "  " + ",".join(ids[-m:]) + ',\n'
                lines.append(addline)
            yield ''.join(lines)
        # yield the last line
        yield "}"

        if keepnames:
            self.save_names(names)

    def fetch(self, out="stocks.py", linesize=4):
        fullname = os.path.join(BASEDIR, out)
        if os.path.exists(fullname):
            rundate = dt.date.today().isoformat()
            os.rename(fullname, fullname + "." + rundate)

        with FileReport(fullname, "wb") as f:
            lines = self.gen_lines(linesize)
            f.write(lines)

    def save_names(self, names, out="names.csv"):
        with FileReport(out, "wb") as f:
            f.write(names)


    def get(self, out="stocks.py"):
        """
        Deprecated
        """
        fullname = os.path.join(BASEDIR, out)
        if os.path.exists(fullname):
            rundate = dt.date.today().isoformat()
            os.rename(fullname, fullname + '.' + rundate)

        date, count = self.before_gen_urls()

        f = open(fullname, 'wb')
        f.write("#automatically update\n")
        f.write("#date {0}, count {1}\n".format(date, count))

        f.write("stocks={\n")
        for url in self.gen_urls():
            print url
            rsp = self.httpget(url)
            data = json.loads(
                rsp.split('([')[1].strip('\n\)\]')
            )
            items = data['items']
            ids = [ '"{0}"'.format(item[0].encode('utf-8')) for item  in items ]
            # place 4 stock ids per line
            l = 4
            n, m = len(ids) / l, len(ids) % l
            groups = [ ids[i*l:(i+1)*l] for i in xrange(0,n)]
            lines = [ "  " + ",".join(group) + ',\n' for group in groups ]
            if m > 0:
                addline = "  " + ",".join(ids[-m:]) + ',\n'
                lines.append(addline)
            f.write(''.join(lines))
        f.write('}\n')
        f.close()