"""
import logging

logging.basicConfig(format='%(levelname)s : %(message)s',
                    datefmt='%m/%d/%Y %H:%M:%S',
                    level=logging.WARNING)
logger = logging.getLogger(__name__)
"""
"""
colors = ['red','blue','red','green','blue']
result = {}

for element in colors:
    if result.get(element) is None:
        result[element] = 1
    else:
        result[element] += 1
print(result)
"""
"""
from collections import Counter
colors = ['red','blue','red','green','blue']
c = Counter(colors)
print('reslut = ',c)
"""
"""
assert 'train' in ['train', 'stack'], 'This is Hint'
"""
"""
mode = 'stack' if False else 'NoStack'
print(mode)
"""
"""
import os
print(os.path.join('..','filename','place','place1_1'))
"""
"""
import numpy as np
set_map = {(2,6):[1,2,3],

           }

keys = set_map.keys()

list_keys = list(keys)

x,y = np.array(list(set_map.keys())).T
print(x)
print(y)
"""
"""
value , vv = np.array(keys)

print(keys)
"""
"""
tensor.view

import torch

tensor_value_0 = torch.randn(1, 2, 10)
tensor_value_1 = tensor_value_0.view(1, -1)


print(tensor_value_1.size())
print(tensor_value_1)
print(tensor_value_0)
"""  # tensor.view
"""
import torch
tensor_0 = torch.randn(1, 3, 2)
tensor_1 = torch.randn(1, 3, 2)
tensor_combine = [tensor_0, tensor_1]
print(tensor_combine)
print(torch.cat(tensor_combine, -1).shape)
print(torch.cat(tensor_combine, 1).shape)
"""  # torch.cat
"""
import torch
# tensor_0 = torch.randn(5, 4)
# print(tensor_0)
# print(tensor_0[:, 2:])
value = [1,2,3]
print(value[1:])

"""  # numpy[:]
"""
value = (1 , 2 ,3 , 5)
print(value)
"""  # ()
"""
import torch
from torch.utils.data import DataLoader, TensorDataset
#
tensor_0 = torch.randn(10, 3)
tensor_1 = torch.randn(10)
#print(tensor_0, tensor_1)
MyDataset = TensorDataset(tensor_0, tensor_1)

# for idx,(tensor_0_train_ids , tensor_1_train_id) in enumerate(MyDataset):
#     print(idx)
#     print(tensor_0_train_ids)

MyDataLoader = DataLoader(MyDataset, batch_size=2)
for idx, (tensor_0_element, tensor_1_element) in enumerate(MyDataLoader):
    print(tensor_0_element, tensor_1_element)
    print('________________________')
"""  # enumerate TensorDataset  DataLoader
"""
import torch
value = torch.randn(6,34)
print(value)
value_max =torch.max(value,1)
print(value_max)
"""  # torch.max()
"""
from transformers import BertTokenizer, BertForQuestionAnswering
import torch

import transformers
print(transformers.__version__)

tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-base-uncased')

question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
inputs = tokenizer(question, text, return_tensors='pt')

start_positions = torch.tensor([1])
end_positions = torch.tensor([3])

outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)
loss = outputs.loss
start_scores = outputs.start_logits
end_scores = outputs.end_logits
"""  # BertForQuestionAnswering
"""
import collections
Girl = collections.namedtuple('Girl',['idx','Name','age','sexly'])
Girls = []
for i in range(5):
    Girls.append(Girl(idx=i,Name='Kice',age=15,sexly=0.86))
print(Girls[0][0])
"""  # collections.namedtuple
"""import torch
x = torch.tensor([1, 2, 3], dtype=torch.long)[None, ...]
y = torch.tensor([1, 2, 3], dtype=torch.long)
z = torch.tensor([1, 2, 3], dtype=torch.long)[...,None]

print(x,'\n',z)"""  # tensor[None, ...]
"""
import torch

tensor_value_0 = torch.randn(10, 10, 10)
print(tensor_value_0)
"""  # view dimension
"""
import numpy as np
import matplotlib.pyplot as plt

x = np.arange(0,10,1)
y = np.array([0.5,0.8,0.3])
y_d = np.array([0,1,0])

plt.plot(y_d,y,'o')
plt.show()
"""  # 散点图
"""
import json
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter

with open('Value.json', 'r', encoding='utf8')as fp:

    json_data = json.load(fp)
    #print(json_data)
    print(json_data['Value'])
    x =[]
    y =[]

    for element in json_data['Value']:
        if(element['CanBuy']==False):
            x.append(element['RSI_6'])
            if element['CanBuy'] == True:
                int_i = 1
            else:
                int_i = 0
            y.append(int_i)
    plt.plot(sorted(x), y, 'o')
    print(len(x))
    x_major_locator = MultipleLocator(10)
    y_major_locator = MultipleLocator(8)
    ax = plt.gca()
    ax.xaxis.set_major_locator(x_major_locator)
    ax.yaxis.set_major_locator(y_major_locator)
    #plt.ylim(0.0,1.0)
    #plt.xlim(0.0,1.0)
    plt.show()

#json.loads()
"""  # Load Json
"""
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import json
import os


def all_path( dirname):
    result = []
    for maindir, subdir, file_name_list in os.walk(dirname):
        for filename in file_name_list:
            apath = os.path.join(maindir, filename)
            result.append(apath)
    return result
paths = all_path('G:\Gitee\learn_ue4\Json\MathResult')

for element_path in paths:
    fig = plt.figure()
    ax1 = plt.axes(projection='3d')

    with open(element_path, 'r', encoding='utf16')as fp:

        json_data = json.load(fp)
        #print(json_data)
        #print(json_data['Value'])
        x1 =[]
        z1 =[]

        x2 = []
        z2 = []

        #y=np.ones(len(json_data['Value']))
        y1 = []
        y2 =[]

        x3= []
        y3=[]
        z3=[]

        x4 = []
        y4 = []
        z4 = []

        x5 =[]
        y5 =[]
        z5 =[]


        a1=[]
        a2=[]
        a3=[]
        a4=[]
        a5=[]
        a6=[]

        x6=[]
        y6=[]
        z6=[]
        can_buy_y =0
        no_can_buy_y = 0
        for element in json_data['Value']:

            if element['Date'] == '20210813':
                x3.append(float(element['RSI_6']))
                y3.append(float(element['MeanValueRelation']))
                z3.append(float(element['RSI_14']))
                a3.append(float(element['on9'])-1)
                ax1.set_title(element_path + str(float(element['on9'])-1), fontsize=12, color='r')
            else:
                if element['CanBuy']=='True' and float(element['on9']) - 1 != -1 and float(element['on9']) - 1 <= 0.02:

                    x1.append(float(element['RSI_6']))
                    z1.append(float(element['MeanValueRelation']))
                    y1.append(float(element['RSI_14']))
                    a1.append(float(element['on9'])-1)
                    can_buy_y +=1
                elif element['CanBuy']=='False'and float(element['on9']) - 1 <= 0.02:
                    if float(element['on9'])-1 != -1:
                        x2.append(float(element['RSI_6']))
                        y2.append(float(element['MeanValueRelation']))
                        z2.append(float(element['RSI_14']))
                        a2.append(float(element['on9'])-1)
                        no_can_buy_y+=1
                elif element['CanBuy'] == 'Collapse':
                    if float(element['RSI_6'])<=1:
                        x4.append(float(element['RSI_6']))
                        y4.append(float(element['MeanValueRelation']))
                        z4.append(float(element['RSI_14']))
                        a4.append(float(element['on9'])-1)

                elif element['CanBuy'] == 'Unkonw':
                    if float(element['RSI_6'])<=1:
                        x5.append(float(element['RSI_6']))
                        y5.append(float(element['MeanValueRelation']))
                        z5.append(float(element['RSI_14']))
                        a5.append(float(element['on9'])-1)
                elif element['CanBuy'] == 'Collapse_Fail':
                    if float(element['RSI_6'])<=1:
                        x6.append(float(element['RSI_6']))
                        y6.append(float(element['MeanValueRelation']))
                        z6.append(float(element['RSI_14']))
                        a6.append(float(element['on9'])-1)

    #y1=np.zeros(can_buy_y)
    #y2=np.zeros(no_can_buy_y)


    #z = np.zeros(can_buy_y)
    #z2 = np.ones(no_can_buy_y)


    plt.xlabel('RSI_6')
    plt.ylabel('MeanValueRealtion')
    ax1.scatter3D(x1,y1,z1,cmap='Blues',c='r')
    ax1.scatter3D(x2,y2,z2,cmap='Blues',c='g')
    ax1.scatter3D(x3,y3,z3,cmap='Blues',c='b' ,norm = '1')
    #ax1.scatter3D(x4,y4,a4,cmap='Blues',c='r')
    #ax1.scatter3D(x6,y6,a6,cmap='Blues',c='g')
   # ax1.scatter3D(x5,y5,z5,cmap='Blues',c='b')
    #ax1.plot3D(x,y,z,'gray')
    plt.show()
    """  # plot
"""
import os, sys
sys.path.append(os.path.abspath("G:/python_example/DowhyPath"))

import numpy as np
import pandas as pd

import dowhy
from dowhy import CausalModel
import dowhy.datasets

data = dowhy.datasets.linear_dataset(beta=10, # beta 表示真实的因果效应
        num_common_causes=5, # 混杂因子，用 W 表示，作用于干预变量和结果变量
        num_instruments=2, # 工具变量，用 Z 表示，作用于干预变量（间接影响结果）
        num_effect_modifiers=1, # 效果修改变量，用 X 表示，作用于结果变量
        num_samples=10000, # 样本数量
        treatment_is_binary=True, # 干预为二元变量，用 v 表示
        num_discrete_common_causes=1)
df = data["df"] # DoWhy 使用 pandas 的 dataframe 来载入数据
print(df.head())
print(data["dot_graph"]) # 还可以输出 gml_graph，内容一致只是表达形式不同

# With graph

#model=CausalModel(
#        data = df,
#        treatment=data["treatment_name"],
#        outcome=data["outcome_name"],
#        graph=data["gml_graph"]
#        )

model= CausalModel(
        data=df,
        treatment=data["treatment_name"],
        outcome=data["outcome_name"],
        instruments=data["instrument_names"], # 官网漏了这一行
        common_causes=data["common_causes_names"],
        effect_modifiers=data["effect_modifier_names"])
# INFO:dowhy.causal_model:Model to find the causal effect of treatment ['v0'] on outcome ['y']
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))

"""  # 因果

"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

import talib as ta
ta_fun=ta.get_function_groups()
ta_fun.keys()

import tushare as ts
pro = ts.pro_api('d0ea4a77478cd666ab7aab6d46fb0082b9146722bd4a2a4fe25f47a1')

df = pro.daily(ts_code='000001.SZ', start_date='20200701')

df.index = pd.to_datetime(df.trade_date)

df_ma=pd.DataFrame(df.close)
a,b,c =ta.MACD(df.close.values[::-1])
mypd = pd.DataFrame(a,
    index = df.trade_date.values[::-1],
                        columns=['value'])
print(mypd)
print(mypd.loc['20210706','value'])
#print(mypd.loc[20210102,'value'])
# print(a)
# print(b)
# print(c)
# df_ma.tail()
# df_ma.loc['2020-01-01':].plot(figsize=(16,6))
#
# ax = plt.gca()
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
# plt.title('上证指数各种类型移动平均线',fontsize=15)
# plt.xlabel('')
# plt.show()
"""  # MACD

"""

import numpy as np
import math

def analyse(Hist, close):
    hist_close = zip(Hist, close)
    current = 0
    holded = False
    close_onhold = 0;
    get = []
    get_count = 0
    current_close =0
    get_date =[]
    for element_Hist, element_close in hist_close:
        if math.isnan(element_Hist):
            continue
        if element_Hist >= current:
            if holded:
                # next
                holded = True  # becase next
            else:
                holded = True
                
                close_onhold = element_close
                get_count += 1

                current = element_Hist
        else:
            if holded:
                # release
                get_percent = (element_close - close_onhold) / close_onhold
                if get_percent>0:
                    get.append(get_percent)
                    holded = False

                current = element_Hist
            else:

                current = element_Hist
        current = element_Hist
        current_close = element_close
    if holded:
        get_percent = (element_close - close_onhold) / close_onhold
        get.append(get_percent)
    return np.sum(get),get_count


import baostock as bs
import pandas as pd
import talib as ta
bs.login()


rs = bs.query_history_k_data('sh.600009',
                             "date,code,close,tradeStatus",
                             start_date='2020-01-01',
                             #end_date='2021-01-01',
                             frequency="d", adjustflag="3")

result_list = []
while (rs.error_code == '0') & rs.next():

    result_list.append(rs.get_row_data())
df = pd.DataFrame(result_list, columns=rs.fields)
#print(df)

df2 = df[df['tradeStatus'] == '1']
dif, dea, hist = ta.MACD(df2['close'].astype(float).values, fastperiod=12, slowperiod=26, signalperiod=9)
mypd = pd.DataFrame(df2['close'].astype(float).values,
    index = df2.date.values,
                        columns=['value'])
#print(mypd)
get_s, count = analyse(hist,df2['close'].astype(float).values)
print(count)
print(get_s)
"""
#"""
import numpy as np
list1 = [1,2,3,4,5,6]
list2 = ['a','b','c','d','e','f']
list3 = []
for index,element in enumerate(list1):
    list3.append([list1[index],list2[index]])

print(list3)

for x,y in list3:
    print(x,y)


def Nto1(N):
    for index ,element in enumerate(N[0]):
        for element_n in N:

#"""  #两个1维对应合并成2维