import requests
import json
import re
from tqdm import tqdm
from elasticsearch import Elasticsearch
import logging
logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)


ES_URL =  "http://192.168.1.43:9200"
INDEX = "dm_doc_news_info"

ee_link = "http://192.168.1.36:8425/entity_extract"

query = {
    "query":{
        "bool":{
            "must":[
                {
                    "exists":{"field": "content"}
                },
                {
                    "match":{"source": "半导体行业观察"}
                },
                {
                    "exists":{"field": "entities"}
                }
            ]
        }
    },
    "size": 100,
    "sort": [
        {
            "publish_time": {
                "order": "desc"
            }
        }
    ]
}


es = Elasticsearch(ES_URL)
page = es.search(index=INDEX, body=query)

sents = set()
for i in range(len(page['hits']['hits'])):
    train_data = []
    doc = page['hits']['hits'][i]
    text = doc['_source']['content']
    for sent in re.split(r"(?<=[。！？\n“”])", text):
        sent = sent.strip()
        if 10 <= len(sent) <= 200:
            sents.add(sent)
    if len(sents) > 2000: break        

raw_file = open('raw04.txt', 'w', encoding='utf-8')
for sent in sents:
    raw_file.write(sent + '\n')
raw_file.close()