from collections import Counter

import numpy as np
from tqdm import notebook, tqdm
from IPython.core.display import display

from jiquanquan.sample import pfr_opt, match_algorithm
from jiquanquan.sample.pfr_opt import FourteenPfr

count = [Counter(['this', 'is', 'the', 'first', 'document']),
         Counter(['this', 'is' 'the' 'second' 'second' 'document']),
         Counter(['and', 'the', 'third', 'one']),
         Counter(['is', 'this', 'the', 'first', 'document']),
         ]
print(count)
# print(count['th'])
# print( 'th' in count)
# print( 'this' in count)
# print(sum(count.values()))
# a = ['', 't', '']
# print(a.count(''))
# a.remove('')
# a.remove('')
# print(a)

# print(sum([1 for c in count if 'first' in c]))
#
# import  pandas as pd
# a = {'this': [1 , 2, 3],'is': [11 , 21, 32]}
# if a.get('this'):
#     print(a.get('this'))
# print(pd.DataFrame(a))
# print(pd.DataFrame(a).values)
#
# a = pd.Series(['1', 2, '1', 2, 3])
# print(a.unique())
# a= [1, 2, 3]
# b= []
# print(b)
# b.extend(a)
# b.extend(a)
# print(b)
# print(np.array([1, 2, 3]) * np.array([1, 2, 3]))
# print(np.sqrt(sum(np.array([1, 2, 3])**2)))
# print(np.sqrt(sum(np.array([1, 2, 2])**2)))

# file = 'corpus/doc_one.txt'
# corpus = []
# with open(file) as f:
#     for line in f.readlines():
#         corpus.append(line.rstrip('\n'))
#     print(corpus)

# with open(file) as f:
#     line = f.readline()
#     while line:
#         print(line)
#     line = f.readline()

# import numpy as np
# arr = np.asarray([[1, 2, 3], [2, 3, 4]])
# print([1, 2, 3] * [1, 2, 1])
# print([i  for i in arr])

from sklearn.model_selection import train_test_split

# 随机划分
corpus_path = '../../docs/corpus/tempData/2014_processed_text.txt'
corpus = open(corpus_path, encoding='utf-8').readlines()
train_data, test_data = train_test_split(
    corpus, test_size=0.2, random_state=10)
# print(train_data)

# dic = set()
#
# for sentence in tqdm(train_data):
#     words = sentence.strip().split(' ')
#     for word in words:
#         if word.strip() != '':
#             if word not in dic:
#                 dic.add(word)
#
# print(len(dic))


# 生成分词测试语句
# test_text = []
# for sentence in tqdm(test_data):
#     # 每行都是一个独立的语句
#     words = sentence.strip().split(' ')
#     test_text.append(''.join(words))
#
# test_text_txt = '../../docs/corpus/tempData/2014_test_text.txt'
# with open(test_text_txt, 'wb') as text:
#     pickle.dump(test_text, text)
#

# print(test_data[0])
sum = 0
for sentence in test_data:
    b = ['关键', '词', '1', '纠正', '错案']
    c = set(b).intersection(set(sentence))
    print(sentence)
    print(len(c))
    break
    # sum+=len(sentence.strip().split(' '))
print(sum)
# print(test_data[4])
# print(len(test_data[3].strip().split(' ')))

a = [1, 2, 3]
print(a[:1])

