# encoding:utf-8
'''
Created on 2020-12-23
@author: adog
ref: https://scikit-learn.org/stable/auto_examples/applications/plot_stock_market.html#sphx-glr-auto-examples-applications-plot-stock-market-py
'''
#import apps.adog.corr as CORR
import apps.adog.dataMan as DM
#import apps.adog.corr as CORR
import util.xtool as xtool
import util.tradeDate as TD
import apps.adog.util.dbcom as dbcom
import apps.adog.util.mdbcom as mdbcom
import sys

import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import font_manager
import pandas as pd

from sklearn import cluster, covariance, manifold

my_font = font_manager.FontProperties(fname="/usr/share/fonts/truetype/arphic/uming.ttc")#/Library/Fonts/Songti.ttc")
#print(__doc__)

#SELECT cod,min(date) FROM future.jq_hq_1d where code_type='fin8' group by cod
codStartDate={
            'A':'2005-01-04',
            'AG':'2012-05-10',
            'AL':'2005-01-04',
            'AP':'2017-12-22',
            'AU':'2008-01-09',
            'B':'2005-01-04',
            'BB':'2013-12-06',
            'BC':'2020-11-20',
            'BU':'2016-01-04',
            'C':'2016-01-04',
            'CF':'2016-01-04',
            'CJ':'2019-04-30',
            'CS':'2016-01-04',
            'CU':'2016-01-04',
            'CY':'2017-08-18',
            'EB':'2019-09-26',
            'EG':'2018-12-10',
            'FB':'2016-01-04',
            'FG':'2016-01-04',
            'FU':'2016-01-04',
            'HC':'2016-01-04',
            'I':'2016-01-04',
            'IC':'2016-01-04',
            'IF':'2016-01-04',
            'IH':'2016-01-04',
            'J':'2016-01-04',
            'JD':'2016-01-04',
            'JM':'2016-01-04',
            'JR':'2016-01-04',
            'L':'2016-01-04',
            'LH':'2021-01-11',
            'LR':'2016-01-04',
            'LU':'2020-06-23',
            'M':'2016-01-04',
            'MA':'2016-01-04',
            'NI':'2016-01-04',
            'NR':'2019-08-12',
            'OI':'2016-01-04',
            'P':'2016-01-04',
            'PB':'2016-01-04',
            'PF':'2020-10-13',
            'PG':'2020-03-31',
            'PK':'2021-02-02',
            'PM':'2016-01-04',
            'PP':'2016-01-04',
            'RB':'2016-01-04',
            'RI':'2016-01-04',
            'RM':'2016-01-04',
            'RR':'2019-08-16',
            'RS':'2016-01-04',
            'RU':'2016-01-04',
            'SA':'2019-12-09',
            'SC':'2018-03-26',
            'SF':'2016-01-04',
            'SM':'2016-01-04',
            'SN':'2016-01-04',
            'SP':'2018-11-27',
            'SR':'2016-01-04',
            'SS':'2019-09-25',
            'T':'2016-01-04',
            'TA':'2016-01-04',
            #'TC':'2016-01-04', ???????????????? should be ZC
            'TF':'2016-01-04',
            'TS':'2018-08-17',
            'UR':'2019-08-09',
            'V':'2016-01-04',
            'WH':'2016-01-04',
            'WR':'2016-01-04',
            'Y':'2016-01-04',
            'ZC':'2016-01-04',
            'ZN':'2016-01-04',

              }

def adogCluster(variation,isPlot=True):
    
    # #############################################################################
    # Learn a graphical structure from the correlations
    edge_model = covariance.GraphicalLassoCV()
    
    # standardize the time series: using correlations rather than covariance
    # is more efficient for structure recovery
    X = variation.copy().T
    X /= X.std(axis=0)+1e-10
    edge_model.fit(X)
    
    # #############################################################################
    # Cluster using affinity propagation
    
    _, labels = cluster.affinity_propagation(edge_model.covariance_) #random_state=0)
    
    n_labels = labels.max()
    if isPlot:    
    # #############################################################################
    # Find a low-dimension embedding for visualization: find the best position of
    # the nodes (the stocks) on a 2D plane
    
    # We use a dense eigen_solver to achieve reproducibility (arpack is
    # initiated with random vectors that we don't control). In addition, we
    # use a large number of neighbors to capture the large-scale structure.
        node_position_model = manifold.LocallyLinearEmbedding(
            n_components=2, eigen_solver='dense', n_neighbors=6)
        
        embedding = node_position_model.fit_transform(X.T).T
        

        # #############################################################################
        # Visualization
        plt.figure(1, facecolor='w', figsize=(10, 8))
        plt.clf()
        ax = plt.axes([0., 0., 1., 1.])
        plt.axis('off')
        #plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
        #plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
        # Display a graph of the partial correlations
        partial_correlations = edge_model.precision_.copy()
        d = 1 / np.sqrt(np.diag(partial_correlations))
        partial_correlations *= d
        partial_correlations *= d[:, np.newaxis]
        non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
        
        # Plot the nodes using the coordinates of our embedding
        plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
                    cmap=plt.cm.nipy_spectral)
        
        # Plot the edges
        start_idx, end_idx = np.where(non_zero)
        # a sequence of (*line0*, *line1*, *line2*), where::
        #            linen = (x0, y0), (x1, y1), ... (xm, ym)
        segments = [[embedding[:, start], embedding[:, stop]]
                    for start, stop in zip(start_idx, end_idx)]
        values = np.abs(partial_correlations[non_zero])
        lc = LineCollection(segments,
                            zorder=0, cmap=plt.cm.hot_r,
                            norm=plt.Normalize(0, 11.7 * values.max()))
        lc.set_array(values)
        lc.set_linewidths(15 * values)
        ax.add_collection(lc)
        
        # Add a label to each node. The challenge here is that we want to
        # position the labels to avoid overlap with other labels
        for index, (name, label, (x, y)) in enumerate(
                zip(names, labels, embedding.T)):
        
            dx = x - embedding[0]
            dx[index] = 1
            dy = y - embedding[1]
            dy[index] = 1
            this_dx = dx[np.argmin(np.abs(dy))]
            this_dy = dy[np.argmin(np.abs(dx))]
            if this_dx > 0:
                horizontalalignment = 'left'
                x = x + .002
            else:
                horizontalalignment = 'right'
                x = x - .002
            if this_dy > 0:
                verticalalignment = 'bottom'
                y = y + .002
            else:
                verticalalignment = 'top'
                y = y - .002
            plt.text(x, y, name, size=10,

                     horizontalalignment=horizontalalignment,
                     verticalalignment=verticalalignment,
                     fontproperties=my_font,
                     bbox=dict(facecolor='w',
                               edgecolor=plt.cm.nipy_spectral(label / float(n_labels)),
                               alpha=.6))
            plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
            plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
        
        plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
                 embedding[0].max() + .10 * embedding[0].ptp(),)
        plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
                 embedding[1].max() + .03 * embedding[1].ptp())
        
        plt.show()
    return labels,n_labels

def getVariation(df,field=[]):
    variation,symbols,names=None,None,None
    if field==['open','close']:
        close_prices=df.pivot(index='date',columns='cod')['close'].dropna(axis=1,how='any').values.T
        #close_prices.dropna(axis=1,how='any')
        open_prices=df.pivot(index='date',columns='cod')['open'].dropna(axis=1,how='any').values.T
        # The daily variations of the quotes are what carry most information
        variation = close_prices - open_prices
    elif field==['chg_cls_r']:
        variation=df.pivot(index='date',columns='cod')['chg_cls_r'].dropna(axis=1,how='any').values.T
        #open_prices.dropna(axis=1,how='any')
    return variation   

        
startDate='2020-07-01'
endDate=xtool.nowDate()
obs_exclusive=['IC','IF','IH','T','TF','TS']
tradeDate=TD.tradeDateDf
tradeDate=tradeDate.loc[(tradeDate['tradeDate']>=startDate)&(tradeDate['tradeDate']<endDate)]
tradeDate.reset_index(drop=True, inplace=True)
dm=DM.DataMan(startDate=startDate,endDate=endDate,freq='1d')
total_index=TD.getTradeDayDiff(startDate,endDate )

isRandomPeriod=False
isPlot=True

for i in range(20):
    df=dm.data.copy()
    sDate,eDate,period=None,None,None
    if isRandomPeriod:
        period=np.random.randint(80, high=300,size=1)[0]
        start_index=np.random.randint(0, high=tradeDate.index.max()-period,size=1)
        end_index=start_index+period
        
        sDate=tradeDate.loc[start_index,'tradeDate'].values[0]
        eDate=tradeDate.loc[end_index,'tradeDate'].values[0]
    else:
        sDate=startDate
        eDate=endDate
        period=TD.getTradeDayDiff(sDate, eDate)
        
    obs=sorted([k for k,v in codStartDate.items() if (v<=sDate)&(not k in obs_exclusive)])
    
    df=df.loc[(df['date']<=eDate)&(df['date']>=sDate)&(df['cod'].isin(obs)),['date','cod','code_cn','open','close','chg_cls_r']]
    df.sort_values(by='cod',ascending=1,inplace=True)
    df.drop_duplicates(['date','cod'], keep='last', inplace=True)
    df.reset_index(drop=True,inplace=True)
    
    codName=df[['cod','code_cn']].drop_duplicates(['cod','code_cn'], keep='last').sort_values(by='cod',ascending=1).values
    symbols, names=codName[:,0],codName[:,1]
    names=np.array([n.replace('期货指数', '') for n in names])
    #names=symbols#np.array([n.replace('期货指数', '') for n in names])
    # The daily variations of the quotes are what carry most information
    variation = getVariation(df, field=['chg_cls_r'])
    
    try:
        labels,n_labels=adogCluster(variation)
        #n_labels = labels.max()
        print('##############################################################################')
        print('round: %s start:%s end:%s  period:%s days'%(i,sDate,eDate,period))
        for i in range(n_labels + 1):
            print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
            
        print('##############################################################################')
    except Exception as e:
        print (e)
    
   
    
        
    if not isRandomPeriod:
        break
