import os
import redis
import pandas as pd
import pickle
import re
import time
import datetime
import warnings

warnings.filterwarnings('ignore')
import dxw
from stock import wencaicopy
from stock import chajian
import functools
import com

pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)

_1_, time_, _2_ = com.time_start()
path_db = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
path= {"other": os.path.join(path_db, "db", "tushare_his", "other"),
        "vol_": os.path.join(path_db, "db", "tushare_his", "other", '{}_vol.json'),
        "vol_today": os.path.join(path_db, "db", "tushare_his", "other", '{}_vol.json'.format(time_)),
        "zt": os.path.join(path_db, "db", "tushare_his", "other", '{}_zt.json'.format(time_)),}


def test():
    # redis.Redis(host="localhost",port=6379,db=0)
    pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
    r = redis.Redis(connection_pool=pool)
    r.set("country", "dalian")
    r.set("city", "英国")
    w = r.get("city").decode("utf-8")
    print(w)
    del r
def xx():
    df = dxw.BK_analyse().zhangfu_main(10)
    redis_r = redis.Redis(host="localhost",port=6379,db=0)

    df_bytes = pickle.dumps(df)
    redis_r.set('test_df', df_bytes)

    df_bytes_from_redis = redis_r.get('test_df')
    df_from_redis = pickle.loads(df_bytes_from_redis)
    print(df_from_redis)
    w = redis_r.get("city").decode("utf-8")
    print(w)
#____________________________
def dxw_gegutosave(func):
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        t1 = time.time()
        res = func(*args, **kwargs)
        if kwargs["time_"]=="":
            time_ = com.time_saveandget()
        else:
            time_=kwargs["time_"]
        res.to_csv(path["vol_"].format(time_), encoding="utf-8")
        print("运行时间{}s".format(time.time()-t1),"开始保存到tushare_his/other/{}_vol.json中".format(time_))
    return wrapper
@dxw_gegutosave
def today_vol(num=2,time_=""):
    name = "{}成交量，5日成交量，30日成交量，120日成交量".format(time_)
    df = wencaicopy.searchMain(name, num)
    df=chajian.today_vol_df(df)
    #print(df)
    return df

def today_vol_read(day=time_):
    if day == "":
        global time_
        day = time_
        print(day)
    #print(day)
    #获取文件夹中最大的名字
    os_json_name=max(os.listdir(path["other"]))#20211024_vol.json

    os_json_name_str=os_json_name[:4]+"-"+os_json_name[4:6]+"-"+os_json_name[6:8]
    day_name_str=day[:4]+"-"+day[4:6]+"-"+day[6:8]
    date1 = datetime.datetime.strptime(os_json_name_str, "%Y-%m-%d")
    date2 = datetime.datetime.strptime(day_name_str, "%Y-%m-%d")
    if os.path.exists(path["vol_"].format(day)) is True:
        # 7天以内，不用更新
        df = pd.read_csv(path["vol_"].format(day), encoding="utf-8", index_col=0)
        print("当日读取完成",path["vol_"].format(day))
        return df
    if abs(date1 - date2).days<7:
        # 7天以内，不用更新
        df = pd.read_csv(path["vol_"].format(os_json_name[:8]), encoding="utf-8", index_col=0)
        print("7天以内读取完成",path["vol_"].format(os_json_name[:8]))
        return df
    else:
        print("没有数据","1.获取数据，2.然后重新运行，走ifos.path.exists")
        today_vol(num=200, time_=day)
        df=today_vol_read(day=day)
        return df
# ____________________________
def ths_price(num=2,time_=""):
    name = "{}，20日均线50日均线250均线".format(time_)
    print(name)
    df = wencaicopy.searchMain(name, num)
    #df=chajian.today_vol_df(df)
    print(df)
    return df

# ____________________________
def zt(time_=time_):
    name="{}涨停和涨停原因及资金".format(time_)#20211021涨停和涨停原因及资金
    df = wencaicopy.searchMain(name, 10)
    df=chajian.zt_df(df)

    # time_=re.findall("\d+",name)[0]
    df.to_csv(path["zt"], encoding="utf-8")
# ____________________________
def his_tushare_today():
    pass
"""
"""
if __name__ == '__main__':
    #test()
    # xx()
    #1.当日涨停
    #zt()
    #2."成交量，5日成交量，30日成交量，120日成交量"
    #可用代替MA120
    #today_vol(num=2,time_="20210109")
    d1=today_vol_read()
    print(d1)

    #today_vol(num=5, time_="20211024")
    df=today_vol_read(day="20211026")
    ths_price(num=2, time_="20211109")
    #3.人气排名
    # name="20210909人气概念板块排名"
    # print(wencaicopy.searchMain(name, 10))


