# coding=utf-8
import time

import traceback
from monthdelta import monthdelta
import datetime
import requests
from pymongo import MongoClient
import Utils
import sys
from pandas import DataFrame
import pandas as pd
reload(sys)
sys.setdefaultencoding('utf-8')


mgClient = MongoClient('121.40.54.235', 27017,
    username='root2',
    password='Dream2015',
    authSource="stockDataStore",
    authMechanism='SCRAM-SHA-1')
db = mgClient['stockDataStore']

'''
def is_finance_item_value_number(string):
    temp = string.replace(".", "").replace("-", "");
    return temp.isdigit()

##sina的财务报表数据有些是不需要的
def is_finance_item_ignore(name):
    if name == '单位':
        return True
    return False
'''

# ==资产负债表
# 诸如 ‘七、其他综合收益’ -> 其他综合收益
# 减:库藏股 -> 库藏股
# ==利润表
# 减：营业外支出 -> 营业外支出
# 基本每股收益(元/股) -> 基本每股收益
# ==现金流量表
# 其中：同业存款 -> 同业存款
# 减：所得税费用 -> 所得税费用
# 递延收益增加（减：减少） -> 递延收益增加
# 已完工尚未结算款的减少(减:增加) -> 已完工尚未结算款的减少
# 已结算尚未完工款的增加(减:减少) -> 已结算尚未完工款的增加
# 已完工尚未结算款的减少(减:增加) -> 已完工尚未结算款的减少
#
def strip_finance_item_name(name, stock_code, type):
    name = name.encode("utf-8")
    if name == '归属于母公司股东的权益' or name == '归属于母公司的股东权益合计' or name == '归属于母公司所有者权益合计':
        return '归属于母公司股东权益合计'

    if name == '所有者权益合计' or name == '所有者权益(或股东权益)合计':
        return '股东权益合计'

    if name == '负债和所有者权益(或股东权益)总计':
        return '负债及股东权益总计'

    if name == '归属于母公司的净利润' or name == '归属于母公司所有者的净利润':
        return '归属于母公司股东的净利润'

    ###招商银行，利润表和资产负债表里面都有少数股东权益， 利润表里面应该是少数股东的损益
    if type == 'profit_statement' and name == '少数股东权益':
        name = '少数股东损益'

    if name == '实收资本(或股本)':
        return '股本'

    prefixes = ['其中：', '其中:', '减：', '减:', '加：', '加:',  '一、',  '二、', '三、' ,'四、', '五、', '六、', '七、', '八、', '九、' ]
    for i in range(len(prefixes)):
        prefix = prefixes[i]
        startwith = name.startswith(prefix);
        if startwith:
            name = name[len(prefix):];
    '''
    suffixes = ['(元/股)', '（减：减少）', '(减:减少)', '(减:增加)']
    for i in range(len(suffixes)):
        suffix = suffixes[i]
        index = name.find(suffix);
        if index != -1:
            name = name[0:index];
    '''
    replaces = ['(元/股)', '（减：减少）', '(减:减少)', '(减:增加)']
    for i in range(len(replaces)):
        rep = replaces[i]
        name = name.replace(rep, "")
    return name

'''
def parse_stock_file_content(content, stock_code, type):
    period_data = [];
    names = [];
    lines = content.split('\n');
    ##报告期size
    for i in range(1, len(lines[0].split("\t"))):
        period_data.append({})
    for line in lines:
        if not line:  ##skip空的行
            continue
        eles = line.split("\t")
        if is_finance_item_ignore(eles[0]): #过滤掉不需要保存的
            continue
        for i in range(0, len(eles)):
            if i == 0:
                eles[0] = strip_finance_item_name(eles[0], stock_code, type)
                names.append(eles[0])
            else:
                if eles[0] == 'period' or is_finance_item_value_number(eles[i]) == False:
                    period_data[i - 1][eles[0]] = eles[i].strip()
                else:
                    period_data[i - 1][eles[0]] = float(eles[i].strip())
    return {"period_data": period_data, "item_name": names};
'''

def download_and_save2(stock_code, url, type):

    save_count = 0

    def strip_item_name(name):
        return strip_finance_item_name(name, stock_code, type)
    try:
        dataFrame = pd.read_csv(url, sep='\t', index_col=0, encoding='gbk')
    except Exception as es:
        Utils.logger.warning("fail to download, code: " + stock_code + ", type: " + type + ", url:" + url)
        return save_count
    dataFrame = dataFrame.drop([u'19700101', dataFrame.columns[-1]], axis=1)
    dataFrame = dataFrame.drop([u'单位'], axis=0)
    dataFrame = dataFrame.fillna(0)
    dataFrame = dataFrame.applymap(lambda x: float(x))
    dataFrame.index = map(strip_item_name, dataFrame.index)
    if type == 'profit_statement':
        dataFrame = dataFrame.transpose()
        koufei_series = get_koufei_profit_series_from_tonghuashun(stock_code)
        #同花顺部分为空的数据显示为False，所以替换为0
        koufei_series = koufei_series.replace(False, 0)
        if koufei_series is None:
            return save_count
        #dataFrame[unicode('扣非净利润')] = koufei_series
        index_of_net_profit = dataFrame.columns.get_loc('净利润')
        dataFrame.insert(index_of_net_profit + 1, unicode('扣非净利润'),  koufei_series)
        dataFrame = dataFrame.transpose()

    item_names = list(dataFrame.index)
    #item_names.insert(0, unicode('period'))

    if not db[type + "_profile"].find_one({"code": stock_code}):
        db[type + "_profile"].insert_one({"code": stock_code, "item_names": item_names})



    for cname in dataFrame.columns:
        period = cname
        series = dataFrame[cname];
        data = series.to_dict();
        data[unicode('code')] = stock_code
        #data[unicode('period')] = period
        data[unicode('period')] = period

        #if type == 'profit_statement':
            #data[unicode('扣非净利润')] = float(koufei_series.get(unicode('扣非净利润'), float(0)))

        if not db[type].find_one({"code": stock_code, unicode("period"): period}):
            db[type].insert_one(data)
            save_count = save_count + 1
        else:
            Utils.logger.debug(
                "finance data (type:" + type + "code:" + stock_code + ") of period(" + period + ") already exists")

    return save_count

'''
因为response_content全部转成了UTF-8，而默认编码（coding=utf-8）是UTF-8，
所以data['period']取到的数据
如果response_content是unicode，则必须用 data[u'period']才可以取值
'''
'''
def download_and_save(stock_code, url, type):
    response = requests.request("get", url);
    response_content = response.content;
    if (not response_content):
        Utils.logger.warning("fail to download, code: " + stock_code + ", type: " + type + ", url:" + url)
        return
    response_content = response_content.decode("GBK").encode("UTF-8")
    stock_content = parse_stock_file_content(response_content, stock_code, type)
    period_data = stock_content['period_data']
    item_names = stock_content['item_name']
    #save to file disk
    Utils.save_finance_data(stock_code, type, response_content)
    for data in period_data:
        period = data['period']
        ##sina 财务报表csv结尾有一个19700101里面的脏数据
        if not period or period == '19700101':
            continue
        data['code'] = stock_code
        if not db[type].find_one({"code": stock_code, "period": period}):
            db[type].insert_one(data)
        else:
            Utils.logger.debug("finance data (type:" + type + "code:" + stock_code + ") of period(" + period + ") already exists" )
        if not db[type + "_profile"].find_one({"code": stock_code}):
            db[type + "_profile"].insert_one({"code": stock_code, "item_names": item_names})

'''

def get_max_period(finance_table_type, code):
    max_period_cursor = db[finance_table_type].aggregate([
        {"$match": {"code": code}},
        {"$group": {"_id": '$code', "max": {"$max": "$period"}}}
    ])
    result = list(max_period_cursor)
    if len(result) == 1:
        max = result[0]
        return max['max'];
    return None

def need_to_get_data(finance_table_type, code):
    max_period = get_max_period(finance_table_type, code)
    if max_period is None:
        return True
    ##数据库最新是2018-03-31，那么3个月后 2018-06-30，只要当前时间大于2018-06-30，就需要去再次获取财报
    next_period = (datetime.datetime.strptime(max_period, '%Y%m%d') + monthdelta(3)).strftime("%Y%m%d")
    current_day = datetime.datetime.now().strftime("%Y%m%d")
    if current_day > next_period:
        return True
    return False

#扣非净利润sina没有，从同花顺获取
def get_koufei_profit_series_from_tonghuashun(stock_code):
    file_url = 'http://basic.10jqka.com.cn/api/stock/export.php?type=report&code=' + stock_code + '&export=main'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36'+stock_code+' (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
    }
    response = requests.request("get", file_url, headers=headers)
    content = response.content
    try:
        path = Utils.save_finance_data(stock_code, 'tmp', content)
        xls_file = pd.read_excel(path, headers=headers, index_col=0, skiprows=[0], encoding='utf-8')
        Utils.remove_file(path)
        index_of_koufei = list(xls_file.index.values).index('扣非净利润(元)')
        series = xls_file.iloc[index_of_koufei]
        series.index = map(lambda period: period.replace("-", ""), series.index)
        Utils.logger.info ("length of koufei series of  " + stock_code + " is " + str(len(series.index)))
        return series
    except Exception as e:
        traceback.print_exc()
        Utils.logger.warning("can not get koufei net profit from tonghuashun of stock_code " + stock_code)
        return None


def collect_stock_finance_data(stock_code):
    balance_sheet_url = "http://money.finance.sina.com.cn/corp/go.php/vDOWN_BalanceSheet/displaytype/4/stockid/" + stock_code + "/ctrl/all.phtml"
    profit_statement_url = "http://money.finance.sina.com.cn/corp/go.php/vDOWN_ProfitStatement/displaytype/4/stockid/" + stock_code + "/ctrl/all.phtml"
    cash_flow_url = "http://money.finance.sina.com.cn/corp/go.php/vDOWN_CashFlow/displaytype/4/stockid/" + stock_code + "/ctrl/all.phtml"
    if need_to_get_data('balance_sheet', stock_code):
        time.sleep(3.5)
        save_count = download_and_save2(stock_code, balance_sheet_url, 'balance_sheet')
        Utils.logger.info("success download and save balance_sheet of " + stock_code + " of count:" + str(save_count))

    else:
        Utils.logger.info("balance_sheet is up to date of " + stock_code)

    if need_to_get_data('profit_statement', stock_code):
        time.sleep(3.5)
        save_count = download_and_save2(stock_code, profit_statement_url, 'profit_statement')
        Utils.logger.info("success download and save profit_statement of " + stock_code + " of count:" + str(save_count))
    else:
        Utils.logger.info("profit_statement is up to date  of " + stock_code)

    if need_to_get_data('cash_flow', stock_code):
        time.sleep(3.5)
        save_count = download_and_save2(stock_code, cash_flow_url, 'cash_flow')
        Utils.logger.info("success download and save cash_flow of " + stock_code + " of count:" + str(save_count))
    else:
        Utils.logger.info("cash_flow is up to date of " +  stock_code)


def collect(stocks):

    for stock in stocks[:]:
        try:
            if Utils.bypass_stock(stock['code']):
                Utils.logger.info(stock['code'] + " bypassed ")
                continue
            Utils.logger.info("--->start to collect stock finance data " + stock["name"] + "-" + stock["code"])
            # stockCode = stock['code'];
            code = stock['code'];
            collect_stock_finance_data(code)
            Utils.logger.info("--->success to collect stock finance data " + stock["name"] + "-" + stock["code"] + "\n")
        except Exception as es:
            traceback.print_exc()
            Utils.logger.error("--->fail to collect stock finance data " + stock["name"] + "-" + stock["code"] + "\n")


def main():
    stocks = Utils.quick_access_cursor(db['stock'].find({"code": {"$regex": "^00"}}, projection={"code": 1, "name": 1}, sort=[("code", 1)]));
    Utils.logger.info("CalcFinanceData.py start to collect stock of exp ^00, count: " + str(len(stocks)))
    collect(stocks)
    stocks = Utils.quick_access_cursor(db['stock'].find({"code": {"$regex": "^30"}}, projection={"code": 1, "name": 1}, sort=[("code", 1)]));
    Utils.logger.info("CalcFinanceData.py start to collect stock of exp ^30, count: " + str(len(stocks)))
    collect(stocks)
    stocks = Utils.quick_access_cursor(db['stock'].find({"code": {"$regex": "^60"}}, projection={"code": 1, "name": 1}, sort=[("code", 1)]));
    Utils.logger.info("CalcFinanceData.py start to collect stock of exp ^60, count: " + str(len(stocks)))

    collect(stocks)

#collect([{'code': '601318', 'name': 'zgpa'}])
main()

Utils.logger.info("CalcFinanceData.py exit")
