import pickle,os,zlib

from jili.core.convert import str2datetime
from jili.core.printlog import print
from jili.tool.state import data_url
import os,datetime
from jili.tool import get_createtimekey

import asyncio
import aiofiles
add_flag=b"0|||"
async def async_write_to_file_add(file_path, content):
    if file_path[-4:]!=".pkl":
        file_path=file_path+".pkl"
    url0 = os.path.abspath(file_path)
    # url0=file_path
    dr=os.path.dirname(url0)
    if not os.path.exists(dr):
        os.makedirs(dr)
    async with aiofiles.open(file_path, mode="ab") as f:
        content = pickle.dumps(content)+add_flag
        await f.write(content)
def save_async_add(data,url):
    # 运行异步任务
    asyncio.run(async_write_to_file_add(url, data))
def save_add(data, url,iszip=False):
    content=data
    file_path=url
    if file_path[-4:]!=".pkl":
        file_path=file_path+".pkl"
    url0 = os.path.abspath(file_path)
    dr=os.path.dirname(url0)
    if not os.path.exists(dr):
        os.makedirs(dr)
    with open(file_path, mode="ab") as f:
        s=pickle.dumps(content)
        if iszip:
            s=zlib.compress(s)
        content = s+add_flag
        f.write(content)
def load_add(url,iszip=False):
    file_path=url
    rst=[]
    if file_path[-4:]!=".pkl":
        url0=file_path+".pkl"
    else:
        url0=file_path
    url0 = os.path.abspath(url0)
    if os.path.exists(url0):
        if os.access(url0, os.R_OK):
            with open(file_path, mode='rb') as f:
                content = f.read()
                content=content.split(add_flag)
                prei=b""
                for i in content:
                    if i!=b"":
                        try:
                            if iszip:
                                i=zlib.decompress(i)
                            ii = pickle.loads(i)
                            if prei!=b"":
                                if iszip:
                                    prei0 = zlib.decompress(prei)
                                else:
                                    prei0=prei
                                ii0=pickle.loads(prei0)
                                rst.append(ii0)
                                prei = b""
                                # print("拼接消息")
                            rst.append(ii)
                        except Exception as e:
                            # print(e,i)
                            if prei==b"":
                                prei=i
                            else:
                                prei=prei+add_flag+i
        else:
            print("没有读权限",url0)
    else:
        print(file_path, url0, "文件不存在")
    return rst


def iter_load_add(url,batch_size=None,buffer_size=1024*1024,iszip=False):
    """
    按指定分割符读取大型文件的部分内容

    参数:
        file_path: 文件路径
        delimiter: 分割符，默认为b"0|||"
        max_chunks: 最大读取的片段数量，None表示读取所有

    返回:
        包含读取到的片段的列表
    """
    file_path=url
    delimiter=add_flag
    buffer = b""
    delimiter_len = len(delimiter)
    batch=[]
    prei = b""
    with open(file_path, "rb") as f:
        while True:
            # 每次读取一定大小的数据块（可根据实际情况调整大小）
            data = f.read(buffer_size)  # 4KB块大小
            if not data:  # 文件读取完毕
                # 如果缓冲区还有数据，作为最后一个片段
                if buffer:
                    try:
                        if iszip:
                            buffer = zlib.decompress(buffer)
                        ii = pickle.loads(buffer)
                        if prei != b"":
                            if iszip:
                                prei0 = zlib.decompress(prei)
                            else:
                                prei0 = prei
                            ii0 = pickle.loads(prei0)
                            if batch_size is None:
                                yield ii0
                            else:
                                batch.append(ii0)
                            prei = b""
                            # print("拼接消息")
                        if batch_size is None:
                            yield ii
                        else:
                            batch.append(ii)
                    except Exception as e:
                        # print(e, buffer)
                        if prei == b"":
                            prei = buffer
                        else:
                            prei = prei + add_flag + buffer
                    # if iszip:
                    #     buffer0=zlib.decompress(buffer)
                    # else:
                    #     buffer0=buffer
                    # chunk=pickle.loads(buffer0)
                    # if batch_size is None:
                    #     yield chunk
                    # else:
                    #     batch.append(chunk)
                break

            buffer += data

            # 查找分割符
            while True:
                pos = buffer.find(delimiter)
                if pos == -1:
                    break  # 缓冲区中没有完整分割符，继续读取

                # 提取分割符前的内容
                chunk = buffer[:pos]
                try:
                    if iszip:
                        chunk = zlib.decompress(chunk)
                    ii = pickle.loads(chunk)
                    if prei != b"":
                        if iszip:
                            prei0 = zlib.decompress(prei)
                        else:
                            prei0 = prei
                        ii0 = pickle.loads(prei0)
                        if batch_size is None:
                            yield ii0
                        else:
                            batch.append(ii0)
                        prei = b""
                        # print("拼接消息")
                    if batch_size is None:
                        yield ii
                    else:
                        batch.append(ii)
                except Exception as e:
                    # print(e, chunk)
                    if prei == b"":
                        prei = chunk
                    else:
                        prei = prei + add_flag + chunk
                # 更新缓冲区，保留分割符后的内容
                buffer = buffer[pos + delimiter_len:]
            if batch_size:
                if len(batch) >= batch_size:
                    for chunk in batch:
                        yield chunk
                    batch = []
    if batch_size is not None:
        for chunk in batch:
            yield chunk
        batch = []

async def async_read_from_file_add(file_path):
    rst=[]
    if file_path[-4:]!=".pkl":
        url0=file_path+".pkl"
    else:
        url0=file_path
    url0 = os.path.abspath(url0)
    if os.path.exists(url0):
        if os.access(url0, os.R_OK):
            async with aiofiles.open(file_path, mode='rb') as f:
                content = await f.read()
                content=content.split(add_flag)
                prei=b""
                for i in content:
                    if i!=b"":
                        try:
                            ii = pickle.loads(i)
                            if prei!=b"":
                                ii0=pickle.loads(prei)
                                rst.append(ii0)
                                prei = b""
                                print("拼接消息")
                            rst.append(ii)
                        except Exception as e:
                            print(e,i)
                            if prei==b"":
                                prei=i
                            else:
                                prei=prei+add_flag+i
        else:
            print("没有读权限",url0)
    else:
        print(file_path, url0, "文件不存在")
    return rst
def load_async_add(url):
    # 读取并打印文件内容
    content = asyncio.run(async_read_from_file_add(url))
    return content
async def async_write_to_file(file_path, content,iszip=False):
    if file_path[-4:]!=".pkl":
        file_path=file_path+".pkl"
    url0 = os.path.abspath(file_path)
    # url0=file_path
    dr=os.path.dirname(url0)
    if not os.path.exists(dr):
        os.makedirs(dr)
    async with aiofiles.open(file_path, mode="wb") as f:
        s = pickle.dumps(content)
        if iszip:
            s=zlib.compress(s)
        await f.write(s)
def save_async(data,url,iszip=False):
    # 运行异步任务
    # asyncio.run(async_write_to_file(url, data,iszip))
    save(data,url,iszip)
async def async_read_from_file(file_path,iszip=False):
    rst=None
    if file_path[-4:]!=".pkl":
        url0=file_path+".pkl"
    else:
        url0=file_path
    url0 = os.path.abspath(url0)
    if os.path.exists(url0):
        if os.access(url0, os.R_OK):
            async with aiofiles.open(file_path, mode='rb') as f:
                content = await f.read()
                if iszip:
                    content=zlib.decompress(content)
                rst = pickle.loads(content)
        else:
            print("没有读权限",url0)
    else:
        print(file_path, url0, "文件不存在")
    return rst
def load_async(url,iszip=False):
    # 读取并打印文件内容
    # content = asyncio.run(async_read_from_file(url,iszip))
    content=load(url,iszip)
    return content

def save_v1(data,url="data",iszip=False):
    if url[-4:]!=".pkl":
        url=url+".pkl"
    # url0=url
    url0 = os.path.abspath(url)
    dr=os.path.dirname(url0)
    if not os.path.exists(dr):
        os.makedirs(dr)
    with open(url0,"wb") as f:
        s=pickle.dumps(data)
        if iszip:
            s=zlib.compress(s)
        f.write(s)
    return url0
def load_v1(url="data",iszip=False):
    rst=None
    if url[-4:]!=".pkl":
        url0=url+".pkl"
    else:
        url0=url
    url0 = os.path.abspath(url0)
    if os.path.exists(url0):
        if os.access(url0, os.R_OK):
            with open(url0,"rb") as f:
                s=f.read()
                if iszip:
                    s=zlib.decompress(s)
                # print("debug","load",url0)
                rst=pickle.loads(s)
        else:
            print("没有读权限",url0)
    else:
        print(url,url0,"文件不存在")
    return rst
def save(data,url="data",iszip=False):
    url0 = url + ".pkl" if not url.endswith(".pkl") else url
    url0 = os.path.abspath(url0)
    parent_dir=os.path.dirname(url0)
    os.makedirs(parent_dir, exist_ok=True)
    with open(url0,"wb") as f:
        s=pickle.dumps(data)
        if iszip:
            s=zlib.compress(s)
        f.write(s)
    return url0
def load(url="data", iszip=False):
    # 统一后缀处理（比os.path更快）
    url0 = url + ".pkl" if not url.endswith(".pkl") else url
    # url0 = os.path.abspath(url0)
    try:
        with open(url0, "rb") as f:
            # 单次读取优化（比多次read更快）
            s = f.read() if not iszip else zlib.decompress(f.read())
            # 直接反序列化（避免中间变量）
            return pickle.loads(s)
    except (FileNotFoundError, PermissionError):
        print(f"文件访问失败 {url0}" if os.path.exists(url0) else f"文件不存在 {url0}")
    except zlib.error as e:
        print(f"解压失败 {e}",url0)
    except pickle.UnpicklingError:
        print("反序列化失败",url0)
    return None
def save_multi_version(data,url,timekey=None,freq="%Y%m%d",hold_version=3):
    if timekey is None:
        timekey=datetime.datetime.now()
    else:
        timekey=str2datetime(timekey)
    tk=timekey.strftime(freq)
    if url[-4:]==".pkl":
        url0=url.replace(".pkl","."+tk+".pkl")
    else:
        url0=url+"."+tk+".pkl"
    name=os.path.basename(url)
    name=name.replace(".pkl","")
    save(data,url0)
    url1=os.path.dirname(url0)
    l=os.listdir(url1)
    ll=[]
    for i in l:
        if ".pkl" in i:
            ii=i.replace(".pkl","")
            if "." in ii:
                name0=ii.split(".")[0]
                if name0==name:
                    ll.append(i)
    ll.sort()
    while len(ll)>hold_version:
        i=ll.pop(0)
        url2=os.path.join(url1,i)
        os.remove(url2)
def load_last_version(url,timekey=None,freq="%Y%m%d"):
    if timekey is not None:
        timekey=str2datetime(timekey)
        tk = timekey.strftime(freq)
        if url[-4:] == ".pkl":
            url0 = url.replace(".pkl", "." + tk + ".pkl")
        else:
            url0 = url + "." + tk + ".pkl"
        if os.path.exists(url0):
            data=load(url0)
            return timekey,data
    url1=os.path.dirname(url)
    name = os.path.basename(url)
    name = name.replace(".pkl", "")
    l=os.listdir(url1)
    ll=[]
    for i in l:
        if ".pkl" in i:
            ii=i.replace(".pkl","")
            if "." in ii:
                name0 = ii.split(".")[0]
                if name0 == name:
                    ll.append(i)
    if ll:
        ll.sort()
        i=ll[-1]
        tk=i.split(".")[1]
        tk=str2datetime(tk)
        data=load(os.path.join(url1,i))
        return tk,data
    return None,None

def load_sysdata(name):
    url=os.path.join(data_url,name)
    return load(url)
def save_sysdata(data,name):
    url = os.path.join(data_url, name)
    return save(data,url)
def openweb(url):
    import webbrowser,platform
    if (platform.system()=='Windows') or (platform.system()=='Linux'):
        webbrowser.open(url, new=0, autoraise=True)
    else:
        import os
        os.system("open "+url)
import copy
def deepcopydict(a):
    return copy.deepcopy(a)
def getconfig(name):
    c=[]
    with open(name,"r") as f:
        for i in f.readlines():
            if len(i)!=1:
                if i[0]!="#":
                    c.append(os.path.abspath(i.strip("\n")))
    return c

def load_localdata(name,hold_days=None):
    base_url = os.path.join(os.path.dirname(__file__), "localdata")
    url=os.path.join(base_url,name+".pkl")
    if os.path.exists(url):
        if hold_days:
            t=get_createtimekey(url)
            now=datetime.datetime.now()
            d=now-t
            d=d.days
            if d >hold_days:
                return None
            else:
                return load(url)
        else:
            return load(url)
    else:
        return None
def save_localdata(name,data):
    base_url = os.path.join(os.path.dirname(__file__), "localdata")
    if not os.path.exists(base_url):
        os.mkdir(base_url)
    url = os.path.join(base_url, name)
    save(data,url)
def istradeday(date):
    a=load(os.path.join(os.path.dirname(__file__),r"..\..\data\tradeset_calender"))
    if a:
        if date in a.keys():
            return a[date]
        else:
            return False
    else:
        print("没有日历文件")
        return False
def parse_output(output):
  #print(output)
  pid_list = []
  lines = output.strip().split("\n")
  if len(lines) > 2:
    for line in lines[2:]:
      pid_list.append(line.split()[1])
  return pid_list
def list_not_response(process_name):
  return list_process(process_name, True)
def list_process(process_name, not_respond=False):
  cmd = 'tasklist /FI "IMAGENAME eq %s"'
  if not_respond:
    cmd = cmd + ' /FI "STATUS eq Not Responding"'
  output = os.popen(cmd % process_name)
  return parse_output(output.read())
def __getmodel(rst,m,modelname,depth,model_type):
    for k ,v in m.__dict__.items():
        if k[0]!="_":
            if str(type(v))=="<class 'module'>":
                if depth>=0:
                    depth=depth-1
                    __getmodel(rst,v,modelname,depth,model_type)
            else:
                t={}
                check=str(type(v))
                if check == "<class 'function'>":
                    if model_type:
                        if model_type!="function":
                            continue
                    t["type"] = "function"
                elif check == "<class 'type'>":
                    if model_type:
                        if model_type!="class":
                            continue
                    t["type"] = "class"
                if t:
                    name=m.__name__
                    t["id"] = k
                    t["model"] = v
                    t["location"] = name
                    if modelname=="":
                        rst.update({k:t})
                    else:
                        if name.find(modelname)>=0:
                            rst.update({k:t})
    # return rst
def getmodels(modelname,path_depth=2,model_type=None,included__init__=True):
    """:param
    modelname:模型地址  证券import的路径
    depth：搜索深度，多少层
    model_type：None全部，class 类   function 函数
    """
    rst={}
    firstlist=modelname.split('.')
    if len(firstlist)>1:
        m=__import__(modelname,globals(), locals(), firstlist,level=0)
        # m = __import__(modelname, globals(), locals(), firstlist, level=0)
    else:
        m = __import__(modelname)
    print("debug","m.__file__",m.__file__)
    #解析py文件
    if m  is None:
        return rst
    if m.__file__  is None:
        return rst
    if "__init__.py" in m.__file__:
        if included__init__:
            __getmodel(rst,m,modelname,0,model_type=model_type)
    else:
        __getmodel(rst, m, modelname, 0, model_type=model_type)
    if path_depth>1:#解析包——文件夹
        fpath=os.path.dirname(m.__file__)
        for i in os.listdir(fpath):
            if i[0]!="_":
                if i[-3:]==".py":#子模块快
                    name=i.split('.')[0]
                    modelname1=modelname+"."+name
                    rst1=getmodels(modelname1,path_depth=0,model_type=model_type)
                    rst.update(rst1)
                elif os.path.exists(os.path.join(fpath,i)) and os.path.isdir(os.path.join(fpath,i)):#子模块
                    path_depth1=path_depth-1
                    if path_depth1>=0:
                        modelname1=modelname+"."+i
                        rst1 = getmodels(modelname1, path_depth=path_depth1, model_type=model_type)
                        rst.update(rst1)
    elif path_depth==1:#解析一级路径
        fpath=os.path.dirname(m.__file__)
        for i in os.listdir(fpath):
            if i[0]!="_":
                if i[-3:]==".py":#子模块快
                    name=i.split('.')[0]
                    modelname1=modelname+"."+name
                    rst1=getmodels(modelname1,path_depth=0,model_type=model_type)
                    rst.update(rst1)

    return rst
def get_models(modelname,path_depth=2,model_type=None,included__init__=True):
    rst0={}
    rst=getmodels(modelname=modelname,path_depth=path_depth,model_type=model_type,included__init__=included__init__)
    for k,v in rst.items():
        rst0[k]=v["model"]
    return rst0
if __name__ == '__main__':
    a = get_models(modelname="researchss.ss",path_depth=3,model_type="class",included__init__=False)
    print(a)