#########################################################-*- coding: utf-8 -*-
#
# 
# Utils, helpful functions
#
# 
##############################################################################
import re
import os
import sys
import urllib
from datetime import date, datetime, timedelta

# File scope global varaiable
ERROR = -1

# Only support python 3.x
if sys.version_info[0] < 3:
    print('\n**************WARNING***************')
    print('* This tool only supports python 3 *')
    print('************************************\n')
    exit()
import urllib.request

AVAIL_WEBSITES = ['sina']

# Basic url of stock raw data for SINA
SINA_BASE_URL = 'http://market.finance.sina.com.cn/downxls.php?'
# If data retrived is leading with the following line, it means no real
# data downloaded
SINA_NOT_DATA = b'<script language="javascript">\r\n'
SINA_DATA_ENCODE = 'GBK'

# sz company full list url
SZ_COMPANY_LIST_URL = 'http://www.szse.cn/szseWeb/FrontController.szse?ACTIONID=8&CATALOGID=1110&TABKEY=tab1&ENCODE=1'
SZ_DATA_ENCODE = 'GBK'

def is_weekend(day):
    """ Check if the day is a weekend
    """
    weekday = date(day.year, day.month, day.day).isoweekday()
    if weekday >= 6:
        return True
    return False

# Function which download stock raw data from website which provide stock
# exchange raw data.
def get_raw_data(website, year, month, day, stock_code):
    """
       website    - string type, current only support 'sina'.
       year       - integer type, represented as YYYY.
       month      - integer type, example: 05, 5, 11
       day        - integer type, example: 3, 03, 30
       stock_code - such as sh600000, sz000001
    """

    if is_weekend(year, month, day):
        error('The day %s-%s-%s is a weekend'%(year, month, day))
        return ERROR
    
    ##########################################################################
    ## 1. check website
    if is_valid_website(website) == False:
        error('Unsupported website "%s" for raw data downloading' %website)
        return ERROR

    ##########################################################################
    ## 2. check year, month, day

    now = datetime.now()

    # year    
    if year < 1990 or year > now.year:
        error('Incorrect year "%d"' %year)
        return ERROR

    # month
    if month < 1 or month > 12:
        error('Incorrect month "%d"' %month)
        return ERROR

    # day
    if day < 1 or day > 31:
        error('Incorrect day "%d"' %day)
        return ERROR
    
    if year == now.year and (month > now.month or
                            month == now.month and day > now.day):
        error('Date is in future "%d.%d.%d"' %(year, month, day))
        return ERROR

    # Convert number into string, with leading-pad zero
    yyyy = '%04d' %year
    mm   = '%02d' %month
    dd   = '%02d' %day

    ## 3. stock_code
    if re.match('(sh60|sh30|sz00)[0-9]{4}', stock_code) == None:
        error('Invalid stock "%s%s"' %stock_code)
        return ERROR

    ##########################################################################
    ## 4. concate url string and download data

    url = ''
    not_data = ''
    data_encode = 'GBK'
    if website == 'sina':
        # date string: date=yyyy-mm-dd
        date='date=' + yyyy + '-' + mm + '-' + dd
        # symbol string: symbol=sh600572
        symbol='symbol=' + stock_code
        # final url
        url = SINA_BASE_URL + date + '&' + symbol
        not_data = SINA_NOT_DATA
        data_encode = SINA_DATA_ENCODE
    elif website == 'hexun':
        error('Not yet support to download data from hexun')
        not_data = ''
        return ERROR
    else:
        # unreachable
        return ERROR

    try:
        raw_file = urllib.request.urlopen(url)
    except IOError:
        raw_file.close()        
        error('Failed to connect "%s"' %url)
        return CONNECT_ERROR

    # HTTP status code must be 200 if data has been retrived
    if raw_file.getcode() != 200:
        raw_file.close()        
        error('"%s" is invalid' %url)
        return ERROR

    # Read all raw data until EOF, raw_data is bytes list
    byte_data = raw_file.readlines()
    raw_data = []
    for l in byte_data:
        raw_data.append(l.decode(data_encode))
    
    if byte_data[0] == not_data:
        raw_file.close() 
        error('Non stock data retrived:')
        error(raw_data)
        return ERROR
        
    raw_file.close()
    return raw_data

# Get raw data encoding
def get_data_encode(website):
    if is_valid_website(website) == False:
        error('Unsupported website "%s"' %website)
        return ERROR

    if website == 'sina':
        return 'GBK'

    # Default
    return 'GBK'

def is_valid_website(website):
    """ Check if given website is supported by this tool.
    """
    for w in AVAIL_WEBSITES:
        if website == w:
            return True
    return False

# Return the root dir of this tool
def get_root_dir():
    fullpath = os.getcwd() + os.sep + sys.argv[0]
    return os.path.dirname(fullpath)

# Store a copy of all latest company to file which will used to decide
# which record in the database should be updated.
def cache_2_file(filename, info_list):
    path = get_root_dir()
    fullpath = path + os.sep + 'cache'
    if os.path.exists(fullpath) == False:
        os.mkdir(fullpath)
    
    fullname = fullpath + os.sep + filename
    if os.path.exists(fullname):
        # Remove old one
        os.remove(fullname)

    stream = open(fullname, 'w')
    for i in info_list:
        stream.write(i)

    stream.close()

def is_cache_file_exist(filename):
    path = get_root_dir()
    fullname = path + os.sep + 'cache' + os.sep + filename
    if os.path.exists(fullname):
        return True
    return False


def get_sh_company():
    """ Download up-to-date company info list from somewhere
    """
    # FIXME: till now, will do only have local copy with mere info
    path = get_root_dir()
    sh_info_file = path + os.sep + 'sh.txt'
    if os.path.exists(sh_info_file) == False:
        print('sh company info file "%s" does not exist'%sh_info_file)
        return ERROR

    stream = open(sh_info_file, 'r')
    info_list = stream.readlines()
    stream.close()
    return info_list


def get_sz_company():
    """ Download up-to-date company info list from szse.cn
    """
    print('getting data...')
    try:
        info_file = urllib.request.urlopen(SZ_COMPANY_LIST_URL)
    except IOError:
        info_file.close()
        error('Failed to connect to www.szse.cn')
        return CONNECT_ERROR

    if info_file.getcode() != 200:
        info_file.close()
        error('"%s" is invalid' %SZ_COMPANY_LIST_URL)
        return ERROR

    # NOTE: <td[#-=:\@\\\'\s\w]*> is not ok!!
    td = re.compile('<td[#-=:\\\@\'\s\w]*>')   # don't use <td.*>
    end_td = re.compile('</td>')
    tr = re.compile('<tr[#-=:\\\@\'\s\w]*>')
    end_tr = re.compile('</tr>')

    # Read the huge data(in 2 lines)
    info_data = info_file.readlines()

    # split the 2th line into thousand lines by </tr>
    info_list = end_tr.split(info_data[1].decode(SZ_DATA_ENCODE))
    new_info_list = []
    for i in info_list:
        n = tr.sub(' ', end_td.sub('|', td.sub(' ', i)))
        new_info_list.append(n)

    # remove garbage lines
    new_info_list.pop(0)
    new_info_list.pop()
    new_info_list.pop()
    
    info_file.close()
    return new_info_list

# Error function, only print out error message
def error(arg):
    print('ERROR: '+arg)

def warning(arg):
    print('WARNING: '+arg)

# Show some message
def msg(arg):
    print('INFO: '+arg)
