def load_data(file_path, input_shape=20):
df = pd.read_csv(file_path, encoding='utf-8', sep='\t')
    # 标签及词汇表
    labels = list(df['label'].unique())
    vocabulary =  list(df['text'].unique())
    
    # 构造字符级别的特征
    string = ''
    for word in vocabulary:
        string += word
    # 所有的词汇表 ,set()去掉重复的字
    vocabulary = set(string) #
    # 1.序列化
    # word2idx 将字映射为索引 '你':0
    word2idx={word:i+1 for i,word in enumerate(vocabulary)}
    with open('word2idx.pk', 'wb') as f:
        pickle.dump(word2idx, f)
    # idx2word 将索引映射为字 0:'你'
    idx2word={i+1:word for i,word in enumerate(vocabulary)}
    with open('idx2word.pk', 'wb') as f:
        pickle.dump(idx2word, f)
    # label2idx 将正反面映射为0和1 '法治':0
    label2idx ={label: i for i,label in enumerate(labels)}
    with open('label2idx.pk', 'wb') as f:
        pickle.dump(label2idx, f)
        # idx2label 将0和1映射为正反面 0:'法治'
    idx2label ={i:labels for i,labels in enumerate(labels)}
    with open('idx2label.pk', 'wb') as f:
        pickle.dump(idx2label, f)
    # 训练数据中所有词的个数
    vocab_size = len(word2idx.keys())  # 词汇表大小
    # 标签类别,分别为法治、健康等
    label_size = len(label2idx.keys())  # 标签类别数量

    # 2. 序列填充,按input_shape填充,长度不足的按0补充
    # 将一句话映射成对应的索引 [0,24,63...]
    x = [torch.tensor([(word2idx[word]) for word in sent]) 
         for sent in df['text']]
    # 如果长度不够input_shape,使用0进行填充
    import torch.nn.functional as F
    x = [ v[0:input_shape]  
          if len(v) > input_shape  
          else F.pad(v,(0,input_shape-len(v)),"constant",0) 
          for v in x ]
    # 形成标签0和1
    y = [[label2idx[sent]] for sent in df['label']]
    y = np.array(y)
    return x,y,idx2label,vocab_size,label_size,idx2word
