# encoding:utf-8
'''
@author:  adog
@Email   :
Created on 2021-03-27 16:19:08
'''
#!/usr/local/bin/python
#-*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import json
import dataMan as DM
import apps.adog.util.mdbcom as mdbcom
import util.xtool as xtool

class Pattern():
    def __init__(self,smoth_k=5,t_k=10):
        self.data=None
        self.fi=None
        self.smoth_k=smoth_k
        self.t_k=t_k
        
    def loadData(self,dataMan=None,cod=[],startDate='2020-01-01',freq='1d'):
        dm=None
        if dataMan is None:
            dm=DM.DataMan(startDate=startDate,obs=cod,freq=freq)
        else:
            dm=dataMan
        self.data=dm.data
        self.fi=dm.future_info
            
    
    
    def find_peak_triangleNum(self,filter_signal, peak_width):
        '''
        判断序列数据波形的三角形个数
        :param filter_signal: 平滑后的波形
        :param step:连续几个
        :param peak_width:尖峰之间的宽距小于peak_width时划分为一个峰，频域数据一般定义在20；
                           时域数据三角形识别一般定义在15，太大会滤掉双峰
        :return:
        '''
        # 判断是否有凸起
        length_data = len(filter_signal)
        thre = 0.7*np.percentile(filter_signal, 95)  # 设置阈值高度  95%是前400个点的20个波峰点
        # 在整个区域内找极值
        l = []
        for i in range(1,length_data-1):
            if filter_signal[i-1] < filter_signal[i] and filter_signal[i]>filter_signal[i+1] and filter_signal[i]>thre:
                l.append(i)
            elif filter_signal[i] == filter_signal[i-1] and filter_signal[i]>thre:
                l.append(i) # 最高点前后可能有相等的情况
        CC = len(l) # 统计极值得个数
        cou = 0
        #ll = l.copy()
        ll=l[:]
        for j in range(1, CC):
            if l[j]-l[j-1] < peak_width:  # 此判断用于将位于同一个峰内的极值点去掉
                if l[j] > l[j-1]:   # 同一个峰内的数据，将小的值替换成0
                    ll[j-1] = 0
                else:
                    ll[j] = 0
                cou = cou+1
        rcou = CC -cou
        ll = [i for i in ll if i > 0]  # 去掉0的值
        peak_index = []
        # 找到每个区间内波峰最大值
        # 截断每个区间再求区间最大值的索引
        for i in range(len(ll)):
            if i == 0:
                index_range = np.array(l)[np.array(l) <= ll[i]]
            else:
                index_range = np.array(l)[(np.array(l)<=ll[i]) &(np.array(l)>ll[i-1])]
            # 找到每个区间最大值得索引
            peak_index.append(index_range[np.argmax(filter_signal[index_range],axis=0)])
        return [rcou,peak_index]

    def triangle_flag_meiquan(self,q_flush, n, ratio,max_index):
        '''
        判断三角形的形状，首先每400个点进行一次过滤，过滤的规则是value-max(value)*0.1
        :param q_flush:
        :param n: 一条振动数据包含的数据点数
        :param ratio: 默认0.2或者0.3
        :return:
        '''
        circle_range = [0]
        zhankongbi = []
        left = []
        right = []
        # 生成int_num*step 的矩阵，头尾需要判断 ,其中q_flush是list类型。
        for i in range(len(max_index)-1):
            next_index = int((max_index[i+1]-max_index[i])/2)+max_index[i]
            circle_range.append(next_index)
            q = q_flush[circle_range[i]:circle_range[i+1]]
            # 每圈除噪
            newq = q-max(q)*ratio
            newq[newq < 0] = 0
            # 占空比计算三角形形状判断
            zhankongbi.append((max(max(np.where(newq > 0))) - min(min(np.where(newq > 0)))) /len(newq))
            max_index_now = np.argmax(newq)
            left.append(max_index_now - min(min(np.where(newq > 0))))
            right.append(max(max(np.where(newq>0))) - max_index_now)
    # 进行三角形识别，不考虑左边为0
        right_zb = np.array(zhankongbi)[np.where(np.array(right) >= np.array(left))]  # 右边大于等于左边对应占空比
        right_count = np.sum(right_zb <= 0.3)                    # 右边大于左边占空比小于0.3的个数
        left_zb = np.array(zhankongbi)[np.where(np.array(left) > np.array(right))]  # 左边大于等于右边对应占空比
        left_count = np.sum(left_zb <= 0.2)                      # 左边大于右边但是占空比小于0.2
        if left_count+right_count >= len(zhankongbi)/2:
            flag = 2  # 直角三角形
        else:
            flag = 1   # 等腰三角形
        return [flag, zhankongbi]
    
    
    def smooth(self,a,WSZ):
        '''
        :param a: 原始数据，NumPy 1-D array containing the data to be smoothed
                   必须是1-D的，如果不是，请使用 np.ravel()或者np.squeeze()转化
        :param WSZ: smoothing window size needs, which must be odd number
        :return:
        '''
        out0 = np.convolve(a,np.ones(WSZ,dtype=int),'valid')/WSZ
        r = np.arange(1,WSZ-1,2)
        start = np.cumsum(a[:WSZ-1])[::2]/r
        stop = (np.cumsum(a[:-WSZ:-1])[::2]/r)[::-1]
        return np.concatenate((start, out0, stop))
    
    def peakLine(self,data,pindex):
        #df=pd.DataFrame(data,columns=['close'])
        df=data[['close']].copy()
        df['node']=np.NaN
       
        pvIndex=[0]+pindex+[len(data)]
        vindex=[np.argmin(df.loc[pvIndex[i]:pvIndex[i+1],'close'].values)+pvIndex[i] for i in range(len(pvIndex)-1)]
        '''
        vindex=[]
        for i in range(len(pvIndex)-1):
            ddff=df.ix[pvIndex[i]:pvIndex[i+1],'close'].values
            vi=np.argmin(ddff)+pvIndex[i]
            vindex.append(vi)
        '''   
        pvIndex=sorted([0]+vindex+pindex+[len(data)])
        df.loc[df.index.isin(pvIndex),'node']=df.loc[df.index.isin(pvIndex),'close']
        df['node']=df['node'].interpolate(method='linear',axis=0)
        #data[list(set(np.arange(len(data)))-set(pindex))]=np.nan
        
        return df['node']
    '''
    def get_peaks(x, n):
        return signal.find_peaks_cwt(x, widths=np.arange(1, n + 1)) 
    
    def peak(data):
        p=find_peak_triangleNum(data,20)
        print(p)
        
    def valley(data):
        p=find_peak_triangleNum(-data,20)
        print(p)
    '''    
    
    def getPeakValley(self,data,p):
        df=data[['date','close']].copy()
        df['node']=0.
        pindex=p[1]
        pvIndex=[0]+pindex+[len(data)-1]
        ''' 
        vindex=[np.argmin(df.loc[pvIndex[i]:pvIndex[i+1],'close'].values)+pvIndex[i] for i in range(len(pvIndex)-1)]
        '''
        vindex=[]
        for i in range(len(pvIndex)-1):
            #if i==12:
            #    print(i)
            ddff=df.ix[pvIndex[i]:pvIndex[i+1],'close'].values
            vi=np.argmin(ddff)+pvIndex[i]
            vindex.append(vi)
        df['peak']=[i if i in pindex else None for i in df.index]
        df['valley']=[i if i in vindex else None for i in df.index]
        pvIndex=sorted([0]+vindex+pindex+[len(data)-1])
        df['node']=df.loc[df.index.isin(pvIndex),'close']
        out=df.loc[df.node>0.,['date','close','peak','valley']]
        out.reset_index(drop=False,inplace=True)
        out.rename(columns={'index':'x'},inplace=True)
        js=out.to_json()
        #out.reset_index(drop=False,inplace=True)
        #df.loc[df.index.isin(pvIndex),'node']=df.loc[df.index.isin(pvIndex),'close']
        #df['close']=df['close']/df.loc[0,'close']
        
        #df['node']=df['node'].interpolate(method='linear',axis=0)
        #data[list(set(np.arange(len(data)))-set(pindex))]=np.nan
        #out=['%s,%s'%(i,out[i]) for i in out.index]
        
        return json.dumps(js)
        
if __name__=='__main__':
    pt=Pattern()
    pt.loadData()
    keyItems=pt.fi.loc[pt.fi[u'重点关注']=='1',u'首码']
    pattern=pd.DataFrame([],columns=['cod','freq','nodes'])
    for k in keyItems:
        
        if k=='L':
            print(1)
        d=pt.data.loc[pt.data['cod']==k,['date','close']]
        d.sort_values(by='date',ascending=1,inplace=True)
        d.reset_index(drop=True,inplace=True)
        smooth_data=pt.smooth(d['close'],5)
        #plt.plot(smooth_data)
        
        p=pt.find_peak_triangleNum(smooth_data,10)
        nodes=pt.getPeakValley(d,p)
        dfn=pd.DataFrame([[k,'1d',nodes]],columns=['cod','freq','nodes'])
        
        pattern=pattern.append(dfn)
        print(p)
    pattern['_id']=pattern['cod']+'_'+pattern['freq']
    pattern['xtime']=xtool.nowTime()
    mdbcom.saveBatch('pattern',pattern.to_dict(orient='records'))
    
    