# -*- coding:utf-8 -*-
from datetime import datetime
from datetime import timedelta
import urllib2
from time import time, sleep, ctime
import os
from sys import getsizeof
import socket
import threading
import MySQLdb

"""
	通过getBarHistDateRange接口获取历史某一段时间内的分钟线数据
	保存的文件名为：OMD_startDate_to_endDate
"""

mylock = threading.RLock()  # the lock is needed for using strptime in each thread

socket.setdefaulttimeout(180)
TIMEOUT = 90

MINIMUM_OUTPUT_SIZE = 100 * 1024 * 1024  # 100MB

startDate = '2017-06-29'
endDate = '2017-06-30'
dirname = 'OMD' + '_' + startDate + '_to_' + endDate + '/'


def OneMinData(secID, startDate, endDate, filenm):
    """
    split the time period (if necessary) and fetch the one
    minute data
    """
    MAX_QUERY_TIME_PERIOD = 30

    mylock.acquire()
    StartDate = OriginDate = datetime.strptime(startDate, '%Y-%m-%d')
    EndDate = datetime.strptime(endDate, '%Y-%m-%d')
    mylock.release()

    buffer_received = []  # a buffer to store the received data

    while StartDate < EndDate:

        # split into smaller time intervals and then query from the server and store in a buffer
        if (EndDate - StartDate) > timedelta(MAX_QUERY_TIME_PERIOD):

            mylock.acquire()
            t1 = datetime.strftime(StartDate, '%Y%m%d')
            t2 = datetime.strftime(StartDate + timedelta(MAX_QUERY_TIME_PERIOD), '%Y%m%d')
            mylock.release()

            received = DataFetch(secID, t1, t2)

            if received[0] != '-1:No Data Returned' and received != []:
                buffer_received.append(received)

            StartDate = StartDate + timedelta(MAX_QUERY_TIME_PERIOD + 1)
        else:
            mylock.acquire()
            t1 = datetime.strftime(StartDate, '%Y%m%d')
            t2 = datetime.strftime(EndDate, '%Y%m%d')
            mylock.release()

            received = DataFetch(secID, t1, t2)

            if received[0] != '-1:No Data Returned' and received != []:
                buffer_received.append(received)

            StartDate = EndDate

        if getsizeof(buffer_received) > MINIMUM_OUTPUT_SIZE:
            # output to the file if accumulated data is larger than the threshold
            if StartDate == OriginDate:
                # for first time create a new file
                print("Create a new file")
                f = open(filenm, 'w')
                try:
                    column_name = buffer_received[0][0].decode('GB2312').encode('utf-8')
                except Exception as e:
                    column_name = buffer_received[0][0].decode('GB18030').encode('utf-8')
                f.write(column_name)
                File_output(f, buffer_received)
            else:
                f = open(filenm, 'a')
                File_output(f, buffer_received)

            buffer_received = []


    # deal with remaining buffer
    if buffer_received != []:
        if not os.path.exists(filenm):
            print("Create a new file ")
            f = open(filenm, 'w')
            try:
                column_name = buffer_received[0][0].decode('GB2312').encode('utf-8')
            except Exception as e:
                column_name = buffer_received[0][0].decode('GB18030').encode('utf-8')
            f.write(column_name)
        else:
            f = open(filenm, 'a')
        File_output(f, buffer_received)


def File_output(filehandler, buffer_received):
    for context in buffer_received:
        for line in context[1:]:  # exclude first row
            try:
                filehandler.write(line.decode('GB2312').encode('utf-8'))
            except Exception as e:
                filehandler.write(line.decode('GB18030').encode('utf-8'))
    filehandler.close()


def DataFetch(secID, startDate, endDate):
    """
    fetch the data by a requesting url

    """
    print "Downloading..."
    print ("secID: %s : from %s to %s " % (secID, startDate, endDate))

    url = 'https://api.wmcloud.com:443/data/v1/api/market/getBarHistDateRange.csv?field=&securityID=%s&startDate=%s&endDate=%s' % (
    secID, startDate, endDate,)
    token = 'd78873452f92f0dfceb8d452a4346ce7cda541b0c7a68767f9dc808856d23bff'
    #	print url
    req = urllib2.Request(url)  # construct an request instance from url
    req.add_header('Authorization', 'Bearer %s' % token)

    fails = 0
    while True:
        try:
            if fails > 3:
                print("time out")
                return []

            context = urllib2.urlopen(req, timeout=TIMEOUT).readlines()

            return context

        except:
            fails += 1
            print("Time out ")
        else:
            print('Next...')
            break


def _mapping_index_symbol(stock_code, market):
    if market == 'XSHG':
        if stock_code == '000001':
            return 'SH1A0001'
        if stock_code == '000002':
            return 'SH1A0002'
        if stock_code == '000003':
            return 'SH1A0003'
        if stock_code == '000004':
            return 'SH1B0001'
        if stock_code == '000005':
            return 'SH1B0002'
        if stock_code == '000006':
            return 'SH1B0004'
        if stock_code == '000007':
            return 'SH1B0005'
        if stock_code == '000008':
            return 'SH1B0006'
        if stock_code == '000010':
            return 'SH1B0007'
        if stock_code == '000011':
            return 'SH1B0008'
        if stock_code == '000012':
            return 'SH1B0009'
        if stock_code == '000013':
            return 'SH1B0010'
        if stock_code == '000015':
            return 'SH1B0015'
        if stock_code == '000016':
            return 'SH1B0016'
        if stock_code == '000017':
            return 'SH1B0017'
        return 'SH' + stock_code
    else:
        return 'SZ' + stock_code


def Fetcher(secid_array, num=1):
    t0 = time()
    mylock.acquire()
    if not os.path.isdir(dirname):
        os.makedirs(dirname)
        print("Make a new directory: %s" % dirname)
    mylock.release()

    for secID in secid_array:
        code = secID[:6]
        market = secID[7:]
        symbol = _mapping_index_symbol(code, market)
        filename = dirname + symbol + '.csv'
        print symbol
        OneMinData(secID, startDate, endDate, filename)
    print('NO.%d part finished in %0.3fs' % (num, time() - t0))


def Secid_Array_Splitter(secid_array, thread_num):
    sessionLength = len(secid_array) / thread_num + 1
    secid_group = []
    for i in range(thread_num):
        if ((i + 1) * sessionLength < len(secid_array)):
            tmp = secid_array[i * sessionLength:(i + 1) * sessionLength]
        else:
            tmp = secid_array[i * sessionLength:]
        secid_group.append(tmp)
    return secid_group


def get_secid_from_db():
    # connect to mysql
    # conn = MySQLdb.connect('localhost','root','root','tradingreasontt')
    conn = MySQLdb.connect(host='rdsqtehrv8tqh7v60yvujpublic.mysql.rds.aliyuncs.com',
                           user='tradingfloor',
                           passwd='tradingfloor123',
                           db='tradingreason',
                           charset='utf8')
    cur = conn.cursor()
    res = []
    # cur.execute("select sec_id from stock_info")
    cur.execute("select sec_id from stock_info")
    for (sec_id,) in cur:
        res.append(sec_id)
    return res


################################################################
if __name__ == '__main__':
    init_time = ctime()
    thread_num = 50
    secid_array = get_secid_from_db()
    secid_group = Secid_Array_Splitter(secid_array, thread_num)
    threads = []

    for i in range(thread_num):
        t = threading.Thread(target=Fetcher,
                             args=(secid_group[i], i + 1))  # create threads.One thread deals with One seclist
        threads.append(t)
    for t in threads:
        # initiate and run threads
        t.setDaemon(True)
        t.start()
    for t in threads:
        t.join()

    print("Initiate at %s. Finished at %s" % (init_time, ctime()))
