# coding:utf-8
import re
import pandas as pd
import pickle
import thulac
from bs4 import BeautifulSoup
import requests
import json
#
# with open('./datasets/clean_xingshisusong_law.txt',encoding='utf-8') as f:
#     lines = f.readlines()
#     out = open('./datasets/clean_xingshisusong_law_1.txt','w',encoding='utf-8')
#     patten = re.compile('第.*?条',re.I)
#     for line in lines:
#         if str(re.match(patten,line.split('    ')[0])) != 'None':
#             out.write(line.strip()+'\n')
#             out.flush()
#             print(line)
#     out.close()


# df = pd.read_excel('./datasets/match_result_re.xlsx',sheetname='Sheet2')
# with open('./datasets/clean_xing_law_1.txt') as f:
#     laws = f.readlines()
# patten = re.compile('.*?（.*?第.*?条',re.I)
# result = []
# for i in range(df.shape[0]):
#     r = [df.iloc[i,0],df.iloc[i,1],df.iloc[i,2]]
#     line = df.iloc[i,3]
#     if line == line and str(re.match(patten,line))!='None':
#         r.append(line.strip())
#         indexs = []
#
#         for j in re.finditer(patten,line):
#             num = j.group()[j.group().index('第')+1:j.group().index('条')]
#             for k,law in enumerate(laws):
#                 if str(re.match('第'+str(num)+'条',law.split('    ')[0])) != 'None' and k not in indexs:
#
#                     r.extend([law.split('    ')[0],law.split('    ')[1].strip()])
#                     indexs.append(k)
#                 elif k not in indexs and num.isdigit() and int(num) not in indexs:
#                     r.extend([laws[int(num)-1].split('    ')[0],laws[int(num)-1].split('    ')[1].strip()])
#                     indexs.append(int(num))
#     result.append(r)
# r_df = pd.DataFrame(result)
# r_df.to_excel('./datasets/re.xlsx',index=False,encoding='utf-8')


# xing_laws = open('./datasets/clean_xing_law_1.txt').readlines()
# susong_laws = open('./datasets/clean_xingshisusong_law_1.txt').readlines()
# xing_laws.extend(susong_laws)
# #dim = 3000
# #tfidf = pickle.load(open('./datasets/all_tfidf-'+str(dim)+'.pkl','rb'))
# thu = thulac.thulac(seg_only=True)
# law_tfidf = []
# for i in range(len(xing_laws)):
#     if i<451:
#          law_tfidf.append(thu.cut(xing_laws[i].split('    ')[1],text=True))
#     else:
#          law_tfidf.append(thu.cut(xing_laws[i].split(' ')[1], text=True))
#     print(i)
# pickle.dump([law_tfidf,[],[],[]],open('./datasets/all_laws.pkl','wb'))
# pickle.dump(tfidf.transform(law_tfidf).toarray(),open('./datasets/law_tfidf.pkl','wb'))

# datas = [line.split('\t')[1].strip() for line in open('./datasets/datas.txt').readlines()]
# xing_laws.extend(datas)
# out = open('./datasets/all_datas.txt','w',encoding='utf-8')
# for i in range(len(xing_laws)):
#     if i < 451:
#         out.write('law'+'\t'+xing_laws[i].split('    ')[1].strip()+'\n')
#     elif i<740:
#         out.write('law' + '\t' + xing_laws[i].split(' ')[1].strip() + '\n')
#     else:
#         out.write('law' + '\t' + xing_laws[i].strip() + '\n')
#     out.flush()
#     print(i)
# out.close()

#law = pickle.load(open('./datasets/law_glove_embedding.pkl','rb'))
#
# req = requests.post(url='https://www.chineselaw.com/ftfx/resultList'
#                     ,data={'query': [{"field":"keywords","cfield":"刑事","value":"刑事"}],'filter': [{"field":"效力级别","cfield":"效力级别：司法解释","value":"xg0401"}],
#                            'queryType': '标题','source': 'CHL:xg04','pageNum': 1,
#                            'getDataType': 'get'},headers={'Content-Type':'application/x-www-form-urlencoded','Connection': 'close'})
#
# print(req.content)

with open('./datasets/sifa.json',encoding='utf-8') as f:
    result = json.load(f)
    for r in result['list']:
        if r['timelinessDic']!='失效':
            print(r['title'])






