# coding:utf-8
import os

# def MakeAllWordsList(train_datasseg):
#     all_word = {}
#     for train_dataset in train_datasseg:
#         for word in train_dataset:
#             if word in all_word:
#                 all_word[word] += 1
#             else:
#                 all_word[word] = 1
#     print("all_words length in all the train datas: ",len(all_word.keys()))
#     all_words_reverse = sorted(all_word.item(),key=lambda f:f[1],reverse=True)
#     for all_word_reverse in all_words_reverse:
#         print(all_word_reverse[0],"\t",all_word_reverse[1])
#     all_word_list = [all_word_reverse[0] for all_word_reverse in all_words_reverse if len(all_word_reverse[0])>1]
#     return all_word_list
#
# if __name__ == '__main__':
#     dir_dict = {"baby": 130, "car": 410, "food": 410, "health": 410, "legend": 410,
#                 "life": 410, "love": 159, "news": 410, "science": 410, "sexual": 39}
#
#     raw_data = "/data/project/python/data/machine_learning"
#     data_file_number = 0
#     for word_data_name,word_data_number in dir_dict.items():
#         while (data_file_number < word_data_number):
#             print("word_data_name: ",word_data_name)
#             print("word_data_number: ",word_data_number)
#             print("data_file_number: ",data_file_number)
#             fd = open(raw_data + "/" + word_data_name + "/" + str(data_file_number) + ".txt","r",encoding="UTF-8")
#             MakeAllWordsList(fd)
#             for line in fd:
#                 print(line + "\n",end="")
#             fd.close()

path = "E:\\八斗\\scala\\11.scala"
try:
    fd = open(path,"r",encoding="UTF-8")
except FileNotFoundError:
    print("No such file or directory: {filename}".format(filename=path))

with open(path,"r",encoding="UTF-8") as fd1:
    line_list = fd1.readlines()
    for line in line_list:
        print(line)

