import pickle
import json
import sys

import numpy as np
import pandas as pd
import torch
from pandas import DataFrame
import os
from pathlib import Path
# from list_rel.list_deal import list_len1_red_dim
def judge_tpye(data,sup):
    """功能：数据类型判断-是否为指定类型；
    data:对象；
    sup=[list,tuple, dict]；指定的数据类型
    """
    if type(data) in sup:  # type() 不会认为子类是一种父类类型，不考虑继承关系#isinstance() 会认为子类是一种父类类型，考虑继承关系
        return True
    else:
        print("error不在支持的数据类型是：",sup)
        return False

def load_txtfile(open_name):
    """
        读txt文件为python对象
        txt=load_txtfile(open_name)
    """
    f = open(open_name)
    txt =f.readlines()
    f.close()
    # print(txt)
    return txt


def load_data_from_pkl(file_path):
    with open(file_path, 'rb') as f:
        data = pickle.load(f)
    return data


def loadpick(filename):
    """
    功能：加载读pickle文件为list对象
    调用：
        open_name='./data/pickle_data/adj_mat.pkl'
        data = loadpick(open_name)#print(type(data))#<class 'generator'>
    注意：pickle中存储的数据类型应该是：list, tuple, or dict
    """
    data = []
    with open(filename, "rb") as f:
        while True:
            try:
                data.append(pickle.load(f))
            except EOFError:
                break
    print(f"打开文件：{filename}  ，len={len(data)},type={type(data)}")
    return data
    # with open(filename, "rb") as f:
    #     while True:
    #         try:
    #             yield pickle.load(f)#yield返回一个生成器generator#可通过next(generator)调用
    #         except EOFError:
    #             break


def load_pickle(pickle_file):
    """
    功能：pickle文件->对象
    """
    try:
        with open(pickle_file, 'rb') as f:
            pickle_data = pickle.load(f)
    except UnicodeDecodeError as e:
        with open(pickle_file, 'rb') as f:
            pickle_data = pickle.load(f, encoding='latin1')
    except Exception as e:
        print('Unable to load data ', pickle_file, ':', e)
        raise
    return pickle_data

def _save2pkl(data,save_path):
    # ensure_dir(SAVE_DIR)
    with open(save_path, 'wb') as f:
        pickle.dump(data, f)

def save2pickfile(file_name,data):
    """
    调用：save2pickfile('adj.pkl',data)
    """
    # 写python对象为pickle
    with open(file_name, 'wb') as f:  # 'wb'写入，不用管错误write() argument must be str
        pickle.dump(data, f)
	# pickle.dump(data,open(file_name,"wb"))#pickle.dump(list(data),open(file_name,"wb"))


def txt2pick(open_name,save_name):
    """
    功能：pickle文件->转换写入txt文件
    调用：
        open_name='P1.txt'
        save_name='P1.pickle'
        txt2pick(open_name,save_name)
    """
    f = open(open_name,encoding='utf-8')
    data = f.readlines()
    # data=f.read().splitlines()#去除换行符-按照行(’\r’, ‘\r\n’, \n’)分隔，返回一个包含各行作为元素的列表
    sup=[list,tuple, dict]
    if judge_tpye(list(data),sup):
        file = open(save_name, 'wb')
        # data = str(txt)  # 转换变量为str格式；TypeError: write() argument must be str, not list
        pickle.dump(data,file)
        # 注意关闭文件
        file.close()
        f.close()
        print(open_name,"成功写入到",save_name,"--写入数据类型为：",type(data))
    # else:
    #     print("pickle中存储的数据类型应该是：list, tuple, or dict")
    return


def save_txtfile(data,dirname,filename):
    """
    功能：写python对象为txt文件
    参数：
        dirname:    以当前文件所在目录为基准
    调用：
        data='abc'
        savedir='data/pickle_data'
        savefilename='adj_mat.txt'
        save_txtfile(data,dirname,save_name)
    """
    #

    np.set_printoptions(threshold=sys.maxsize) #python写入文件出现省略号
    abs_dir_par = Path(__file__).parent.absolute()##取当前目录的上级目录
    dir_path=os.path.join(abs_dir_par,dirname,filename)
    # print(f"dir_path:{dir_path}")
    #问题：python的写入txt有省略号问题-可能写入数据为DataFrame，其默认是输出100行，多的话中间数据会输出省略号
    #方法如下：
    pd.set_option('display.width', 1000)  # 设置字符显示宽度
    pd.set_option('display.max_rows', None)  # 设置显示最大行
    pd.set_option('display.max_columns', None)  # 设置显示最大行

    with open(dir_path,'w',encoding='utf-8') as f:#with方式不用关闭
        f.write(str(data))
        print(f"保存文件：{dir_path}")
        # print(f"保存文件：{dir_path},len(data)={len(data)}")
        # list1 = [['a', 'b']]  # len()=1
        # while type(data) == list and len(data) == 1:  # 多维长度为1的list-降维到一维
        #     data = data[0]
        #
        # else:
        #     for i in data:
        #         print(type(i))
        #         f.write(str(i))#TypeError: write() argument must be str, not list
    # # 注意关闭文件
    # f.close()
    # print(txt)
    return


def pick2txt(open_name,dirname,save_name):
    """
    pickle文件-转换写入txt文件
    调用：
        open_name='./data/pickle_data/adj_mat.pkl'
        savedir='data/pickle_data'
        savefilename='adj_mat.txt'
        pick2txt(open_name,savedir,savefilename)
        list_len1_red_dim(data)#查看list
    """
    # # with open(open_name, 'rb') as pf:
    # #     fi = pickle.load(pf)
    # fi = open(open_name, 'rb+')
    # data = pickle.load(fi)#_pickle.UnpicklingError: invalid load key, '\xef'.不是编码问题
    # txt_file = open(save_name, 'w')
    # # fi = str(data)  # 转换变量为str格式；TypeError: write() argument must be str, not list
    # data=load_pickfile(open_name)
    try:
        data = loadpick(open_name)#print(type(data))#<class 'generator'>
        # print(f"{type(data)},{len(data)}")
    except:#用torch保存的模型
        data = torch.load(open_name)
    save_txtfile(data, dirname, save_name)
    return data
pick2txt("data/pickle_data/vocab.pkl","data/pickle_data","vocab.txt")

def pkl_mod_test(open_name,dir_name,save_name):
    """
    功能：修改pickle模型参数；
    形式：cora.pkl(torch.load式模型框架)->对象->cora_dist.pkl#->cora_dist.txt;
    调用：
        open_name='cora.pkl'
        dir_name='/data/pickle_data/dataset_dist_pkl'
        pkl_mod_test(open_name,dir_name,'cora_dist.pkl')
    """
    #1.cora.pkl(torch.load式模型框架)-》对象
    #问题：参数改了，但还是不行，可能是权重没改
    #修改decoder中的参数-因为乘法报错2708*520，1441*256
    # exc_path = sys.path[0]#当前文件所在目录
    file_path = sys.path[0] + '/' + dir_name + '/' + open_name
    print(file_path)
    # file_path="{}/data/pickle_data/dataset_dist_pkl/{}.pkl".format(exc_path,open_name)
    cvae_model = torch.load(file_path)
    # torch.save(cvae_model.state_dict(),'cora_dist1.pkl')#保存->
    # print(cvae_model.__dict__['_modules']['decoder'].MLP[0].in_features)
    # print(cvae_model)
    cvae_model.__dict__['_modules']['decoder'].MLP[0].in_features=520

    # 2.保存修改后的文件：对象-》pkl
    save_path = sys.path[0]+'/' + dir_name + '/' + save_name
    # save_path="{}/data/pickle_data/dataset_dist_pkl/{}.pkl".format(exc_path,save_name )
    f=open(save_path,'wb')
    # pickle.dump(cvae_model,f)#RuntimeError: Invalid magic number; corrupt file?
    torch.save(cvae_model,f)

    # 3.pkl-》txt
    # exc_path = sys.path[0]  # 当前文件所在目录
    # open_name = "{}/data/pickle_data/dataset_dist_pkl/{}.pkl".format(exc_path,save_name)
    # dir_name = "{}/data/pickle_data/dataset_dist_pkl".format(exc_path)
    # pick2txt(open_name, dir_name, 'cora_dist.txt')
# open_name='cora.pkl'#不带后缀
# dir_name='/data/pickle_data/dataset_dist_pkl'
# pkl_mod_test(open_name,dir_name,'cora_dist.pkl')

def pkl2obj(open_name):
    """功能：查看pkl(torch.load式)内容-当指定目录；
    形式：pkl->对象
    调用：pkl2obj('data/pickle_data/dataset_dist_pkl/cora_dist.pkl')
    """
    exc_path = sys.path[0]#当前文件所在目录
    file_path=exc_path+'/'+open_name
    cvae_model = torch.load(file_path)
    print(cvae_model)
    print('-'*40)
    print(cvae_model.__dict__)
    return cvae_model
# pkl2obj('data/pickle_data/dataset_dist_pkl/cora.pkl')




