from torch.utils.data import Dataset
import requests   
import _thread    
import json 
class GPT2MedicalQADataSet_txt_ntrain(Dataset):
    def __init__(self,file_paths,nraws,shuffle , tokenizer, max_len, data_dir, data_set_name , is_overwrite=False):
        """
        初始化函数
        Args:

            file_path： txt 文件路径  # file_raws ,f_n = 10  # 文件总数
            tokenizer: 分词器
            max_len: 数据的最大长度
            data_dir: 保存缓存文件的路径
            data_set_name: 数据集名字
            path_file: 原始数据文件
            is_overwrite: 是否重新生成缓存文件
        """
        f_n = 3  # 文件总数        
        # self.dload_n = 4
        # 文件描述
        self.fpi =0 # 第一个文件

        self.f_n =f_n      # 文件总数         
        # 在一个文件中 读取的数据数量
        self.nraws = nraws

        self.tokenizer = tokenizer
        # content_id和title_id分别对应新闻的正文和标题，为了在模型中区分的更明显
        self.content_id = self.tokenizer.convert_tokens_to_ids("[Content]")
        self.title_id = self.tokenizer.convert_tokens_to_ids("[Title]")
        # space_id表示空格标记，由于一些标题中带有空格，如果直接使用tokenizer进行分词，会导致空格消失，会显得标题很奇怪
        # 但是又不方便同一替换成任意一个标点，因此将其用[Space]替换。
        self.space_id = self.tokenizer.convert_tokens_to_ids("[Space]")
        self.max_len = max_len  
        self.shuffle = shuffle

    def initial(self):
        self.get_fpi(self.fpi )
        self.file_path ='../data/train_0.txt'   #第一个文件的名字
        file_raws  = self.get_files_raws( self.file_path)    #每一个文件的行数
        self.file_raws = file_raws * self.f_n 
        print("train  file_raws" ,    self.file_raws )    
        self.finput = open(self.file_path,'r')
        self.samples = list()
        for _ in range(self.nraws):
            data =   self.finput.readline()  
            # self.finput.readline()   # data contains the feature and label
            #if  len(data) <50:
            #    print( data  ) 
            if len(data) <=1  :#本文件读完
                self.fpi =self.fpi+1  #0  1
                #self.currfile_n -= 1
                #print("\nssssssssss\n",self.fpi )             
                if  self.fpi < self.f_n : # 肯定有文件 读下一个文件         
                    self.get_fpi(self.fpi )
                    self.file_path = '../data/train_'+str(self.fpi)+'.txt' #self.file_paths[self.fpi] #打开新文件
                    #print("\n read next file :--" ,  self.fpi,self.file_path,'\n' )
                    self.finput = open(self.file_path,'r')
                    data = self.finput.readline()   
            data = eval( data  )
            if isinstance(data,dict) :
                self.samples.append(data)
            else:
                print('v————',_,data)
                break
        #self.currfile_n  = self.curraddfile_n - 1
        # put nraw samples into memory
        self.current_sample_num = len(self.samples)
        self.index = list(range(self.current_sample_num))
        # print(self.index)
        if self.shuffle:
            random.shuffle(self.samples)
    def __len__(self):
        return self.file_raws #len(self.data_set)
    def __getitem__(self,idx):
        idx = self.index[0]
        
        data = self.samples[idx]
        print(idx,len(self.index), data['input_ids'][-12:-4])
        self.index = self.index[1:]
        self.current_sample_num-=1
        #print(self.fpi,len(self.index) )
        if self.current_sample_num<=0:
            self.samples = list()
            #print("\n no data current_sample_num \n " ,len(self.samples) ,len(self.index))
            for _ in range(self.nraws):
                data =   self.finput.readline()  
                # self.finput.readline()   # data contains the feature and label
                if len(data) <=1  and self.fpi < self.f_n :#本文件读完                    
                    self.fpi =self.fpi+1  #0  1        
                    #url = 'http://10.7.0.100:31672/train_'+str(self.fpi)+'.txt' 
                    #r = requests.get(url)
                    #with open("../data/train_"+str(self.fpi)+".txt", "wb") as code:
                    #    code.write(r.content)
                    #with open("../data/train_"+str(self.fpi-1)+".txt", "w") as code:
                    #    code.write('')                    
                    self.get_fpi(self.fpi )
                    _thread.start_new_thread( self.del_fpi_1, (self.fpi-1,))
                    self.file_path = '../data/train_'+str(self.fpi)+'.txt' #self.file_paths[self.fpi] #打开新文件
                    print("\n read next file :--" ,  self.fpi,self.file_path,'\n' )
                    self.finput = open(self.file_path,'r')
                    data = self.finput.readline()   
                if len(data)>0 :
                    data = eval( data  )
                    if isinstance(data,dict) :
                        self.samples.append(data) 
                    else:
                        print('v————',_,data)
                        break
            self.current_sample_num = len(self.samples)
            self.index = list(range(self.current_sample_num))
            if self.shuffle:
                random.shuffle(self.samples)
        # print(data.keys())
        return data
    def get_fpi(self,fpi):
        # python http
        #url = 'http://10.7.0.100:31672/train_'+str( fpi)+'.txt'        
        # fastdfs 
        fname = "train_" +str(fpi) +".txt"
        #url  = #"http://10.10.143.245:8080/group1/default/20210507/14/07/4/"+fname+"?name="+fname+"&download=1"
        url  = self.url_r( fpi )
        print(url) 
        r = requests.get(url)
        with open("../data/"+fname  , "wb") as code:
            code.write(r.content)
        print(url,r.ok)
        # data =[ eval(a) for a in r.content.split(b'\n')[:-1] ] # 这个比下面的快一点
        # data = r.content.split(b'\n')[:-1]
        # d2 =list(map(eval,data))
    def url_r(self,fpi):
        t=200
        f=    open('fpi.txt')
        iii =  eval(f.readline())-1
        fpi  =fpi+t*iii + 1 + iii

        # iii = int(fpi/t)
        reltxt = 'train_'+str(fpi)+'.txt'
        
        ips = ['192.168.3.228','192.168.3.90','192.168.3.216','192.168.3.122','192.168.3.21','192.168.3.31']
        url = "http://"+ips[iii]+":8080/group1/list_dir?dir="
        
        r = requests.get(url + "default/20210507/20/")
        b1_dir = json.loads(r.content)['data']
        for bd in b1_dir:
            url_bd =  bd['path']+bd['name']
            r = requests.get(url + url_bd)
            comm_dir = json.loads(r.content)['data']
            r = requests.get(url + url_bd+'/'+comm_dir[0]['name'])
            f_dirs = json.loads(r.content)            
            for fd in f_dirs['data']: 
                #print(fd['name'], reltxt)                
                if fd['name'] == reltxt :
                    rul=  "http://"+ips[iii]+":8080/group1/"+fd['path']+'/'+fd['name']
                    #print(rul)
                    return rul
        
    def get_fpifrom_n(self,fpi):
        # python http
        #url = 'http://10.7.0.100:31672/train_'+str( fpi)+'.txt'        
        # fastdfs 
        fname = "train_" +str(fpi) +".txt"
        # url  = "http://10.10.143.245:8080/group1/default/20210507/14/07/4/"+fname+"?name="+fname+"&download=1"
        url  = self.url_r( fpi)
        r = requests.get(url)
        with open("../data/"+fname  , "wb") as code:
            code.write(r.content)

    def del_fpi_1(self, fpi):
        with open("../data/train_"+str(fpi)+".txt", "w") as code:
            code.write('')
    def get_files_raws(self,file_path) :# get the count of all samples
        file_raws =0
        with open(file_path,'r') as f:
            for _ in f:
                file_raws+=1
        return file_raws
