
import os
import requests
from fuzzywuzzy import fuzz
import pandas as pd
import __init__
import other
import dxw
import start
from get_data import Altas_db

"""
1.增加板块信息
2.增加人气
3.增加换手率"""

start_time, today_time, _hour_= other.time_start()  # start_time,today_time,today_hour=时间('20200731', '20200820', '16:12')

# #1.1读取dxw热门板块
# # path=path["gegu_zhangfu_bankuai"].format(today_time)
# dxw_data=Altas_db._readdf("dxw_bk","{}_bk".format(today_time),dtype="dxw")
# print(dxw_data)
# if dxw_data is None:
#     dxw_data = dxw.BK_analyse().zhangfu_main(10)  # 个股涨幅
#
# dxw_bk_list=dxw_data.板块.drop_duplicates()
# print(dxw_bk_list)#[光伏,锂电池]
# #1.2读取ths板块，并模糊匹配，保存
# # ths_bk_df=pd.read_csv('db_temp/ths_bk/gnbk_{}.csv'.format("gn"))
# ths_bk_df=Altas_db._readdf("ths","gnbk",dtype="dxw")
# ths_bk_list=ths_bk_df.Name
#
# list_=[]
# for i in dxw_bk_list:
#     for i_ths in ths_bk_list:
#         score=fuzz.token_sort_ratio(i, i_ths)
#         if score>50:
#             #print(i, i_ths,score)
#             list_.append(i_ths)
# print(list_)
# test=pd.DataFrame(data=list_,columns=["bk_name"])
# Altas_db._save_mongo_db(test,"dxw_bk_ans","{}_1.2_ths_bk_list.json".format(today_time))
# # test.to_csv("db_temp/ans_/{}_1.2_ths_bk_list.json".format(today_time), encoding="utf-8")
# #1.3 ths板块读取（带详细个股），合并1.2
# #合并，index按顺序排列
# #重命名
# # gn_df= pd.read_csv("db_temp/ths_bk/筛选数据_{}.json".format("gn"), encoding="utf-8", index_col=0)
# gn_df=Altas_db._readdf("ths","gnbk_gegu",dtype="dxw")
# _df=pd.merge(gn_df, test, how='inner', on='bk_name')
# _df.index=_df.bk_name
# _df=_df.loc[list_]
# _df= _df.rename(columns={'S_ID':'code'})
# _df['code'] = _df['code'].astype(int).astype(str).str.zfill(6)  # 1补缺
# Altas_db._save_mongo_db(_df,"dxw_bk_ans","{}_1.3_ths.json".format(today_time))
# print(_df.info())
# b=_df.groupby('code').apply(lambda d: tuple(d.index) if len(d.index) > 1 else None).dropna()
# print(len(b))
# Altas_db._save_mongo_db(b,"dxw_bk_ans","b.json")
#
# dict_df={k: tuple(d.index) for k, d in _df.groupby('code') if len(d) > 1}
# print(dict_df)
# print(sorted(dict_df.items(),key = lambda x:len(x[1]),reverse = True))
# a=_df[_df.code.duplicated(keep='first')]
# Altas_db._save_mongo_db(a,"dxw_bk_ans","a.json")
# print(len(a))
# rixiaojie=Altas_db._readdf("ts","3.5日小结")
# _df_1=pd.merge(_df, rixiaojie, how='inner', on='code')
# #_df_1.to_csv("db_temp/ans_/{}_1.3.1_ths.txt".format(today_time), encoding="utf-8")
# print(_df_1.info())
# _df_2=_df_1.drop_duplicates(["code"],keep="first")
# #_df_2.to_csv("db_temp/ans_/{}_1.3.1_ths.txt".format(today_time), encoding="utf-8")
# Altas_db._save_mongo_db(_df_1,"dxw_bk_ans","{}_1.3.1_ths.txt".format(today_time))
#
#
# #————————————————————————————————————————
# # import __init__
#
# df=start.dfcf()
# df1=start.ths()
# #todo NLP文字处理
# #产靠文献：https://blog.csdn.net/asialee_bird/article/details/96454544
# def getSecids(dfcf:pd.DataFrame) ->str:
#     df = ",".join([str(i) for i in dfcf.sc.tolist()])
#     url = "https://vipmoney.eastmoney.com/collectapi/ranking/GubaHotTopicNew?code="+df+",?v=011036183734035743"
#     print(url)
#     headers={
#         "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
#         "Accept-Encoding": "gzip, deflate, br",
#         "Accept-Language": "zh-CN,zh;q=0.9",
#         "Cache-Control": "max-age=0",
#         "Connection": "keep-alive",
#         #"Cookie": "qgqp_b_id=653ecb63ecd9217267ac2ffd8932a0a2; HAList=a-sh-601728-%u4E2D%u56FD%u7535%u4FE1; em_hq_fls=js; intellpositionL=1079.19px; st_si=64104378274450; _qddaz=QD.2gtapf.nqyrx8.kwex5gx1; cowCookie=true; st_asi=delete; intellpositionT=1055px; st_pvi=93875626554371; st_sp=2021-09-03%2019%3A41%3A08; st_inirUrl=https%3A%2F%2Fguba.eastmoney.com%2F; st_sn=5; st_psi=20211125203124772-119122306361-0448153457",
#         "Host": "vipmoney.eastmoney.com",
#         "Referer": "https://www.ricequant.com/",
#
#         "sec-ch-ua-mobile": "?0",
#         "sec-ch-ua-platform": "Windows",
#         "Sec-Fetch-Dest": "document",
#         "Sec-Fetch-Mode": "navigate",
#         "Sec-Fetch-Site": "cross-site",
#         "Sec-Fetch-User": "?1",
#         "Upgrade-Insecure-Requests": "1",
#         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36",
#
#     }
#
#     response = requests.get(url, headers=headers)
#     print(response.text)
# # getSecids(df)
# df=[x[2:8] for x in df['sc'].tolist()]
# print(df)
# print(df1.code.tolist())
# list_both=list(set(df) & set(df1.code.tolist()))
# # print(len(list_both))
# #----------------------------------------
# df=Altas_db._readdf("ts","2.his数据下载_fast1")
#
# n = list(set(df.trade_date))
# print(len(n))
# #todo ar br /boll
# #----------------------------------------
tvp_df0,tvp_df1=Altas_db._read_ths_volMax(dtype="pri",model="TwoMaxdf")
print(tvp_df0.info(),tvp_df1.info())
pri_merge=pd.merge(tvp_df0, tvp_df1, how='inner', on='code')
cho1=pri_merge["250日_x"]>pri_merge["250日_y"]
pm=pri_merge[cho1]
# print(len(pm))

tvv_df0,tvv_df1=Altas_db._read_ths_volMax(dtype="vol",model="TwoMaxdf")
print(tvv_df0.info(),tvv_df1.info())
vol_merge=pd.merge(tvv_df0, tvv_df1, how='inner', on='code')
cho1=vol_merge["120日_x"]>vol_merge["120日_y"]
vm=vol_merge[cho1]
print(len(vm))

pp=pd.merge(pm, vm, how='inner', on='code')
print(len(pp))
Altas_db._save_mongo_db(pp,"dxw_bk_ans","pp".format(today_time))

# df_1_1=Altas_db._readdf("ts","1.数据下载")
# merge1=pd.merge(df_1_1, ths_vol_pri_df, how='inner', on='code')
#
# ths_vol_vol_df=Altas_db._read_ths_volMax(dtype="vol")
# m2=pd.merge(merge1, ths_vol_vol_df, how='inner', on='code')
# print(len(m2),m2.head())
#
# cho1=m2.trade>0.9*m2["250日"]
# cho2=(m2.volume>m2["120日"])
# cho3=(m2.turnoverratio>1)
# print(cho2)
# df=m2[cho1 & (cho2|cho3)]
# # df[df.volume>]
# print(len(df),df)

