'''
sourceUrl原文导出工具
'''

import csv
import time
import xlwt
from tkinter import *
from concurrent.futures import ThreadPoolExecutor
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
from datetime import datetime
from datetime import timedelta


class App(Tk):

    def __init__(self, *args, **kwargs):
        super(App, self).__init__(*args, **kwargs)

        self.pool = ThreadPoolExecutor(10)
        self.elastic = Elasticsearch(hosts=["http://121.40.195.91:1080"], http_auth=("newrank", "cd123456"))

        self.title("原文导出工具 v1.1")
        self.geometry("600x500")

        self.txt = Text(self, height=35, width=90)

        self.btn = Button(text='原文链接导出', fg='blue', command=self._click)
        self.btn2 = Button(text='竞品导出(近一月)', fg='blue', command=self._click2)

        self.txt.pack()
        self.btn.pack(padx=5, pady=10, side=LEFT)
        self.btn2.pack(padx=5, pady=10, side=LEFT)

    def _getInput(self):

        content = self.txt.get(1.0, END)
        return [line.strip() for line in content.split("\n") if line.strip()]

    def _click(self):

        lines = self._getInput()

        self.destroy()

        filename = datetime.now().strftime('%Y%m%d%H%M%S') + ".csv"

        with open(filename, 'w', newline='') as csvfile:
            fieldnames = \
                ['id', 'uid', 'publicTime', 'orderNum', 'clicksCount', 'likeCount', 'url', 'sourceUrl']
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

            for hits in self.pool.map(self._export, lines):
                if hits:
                    writer.writerows(hits)

        print("")
        print("导出完成! 已保存至 {}".format(filename))
        time.sleep(30)

    def _export(self, url):
        resp = self.elastic.search(index="weixin_data_2018", body={
            "_source": ["uid", "url", "clicksCount", "sourceUrl", "id", "publicTime", "orderNum", "likeCount"],
            "size": 1000,
            "query": {
                "bool": {
                    "filter": [
                        {
                            "term": {
                                "sourceUrl": url
                            }
                        }
                    ]
                }
            }
        })

        print("=> 找到文章 {}  source_url={}".format(resp["hits"]["total"], url))

        return [hit['_source'] for hit in resp['hits']['hits']]

    def _click2(self):

        headers = ['id', 'uid', 'title', 'publicTime', 'orderNum', 'clicksCount', 'likeCount', 'url', 'sourceUrl']

        def export(url):

            print("开始查找 => " + url)

            resp = scan(self.elastic, query={
                "_source": headers,
                "size": 500,
                "query": {
                    "bool": {
                        "filter": [
                            {
                                "range": {
                                    "publicTime": {
                                        "gte": (datetime.now() - timedelta(days=30)).strftime('%Y-%m-%d %H:%M:%S'),
                                        "lt": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                                    }
                                }
                            },
                            {
                                "wildcard": {"sourceUrl": "*" + url + "*"}
                            }
                        ]
                    }
                }
            }, index="weixin_data_2018", doc_type="data")

            count, data = 0, []
            for hit in resp:
                count += 1
                data.append(hit["_source"])

                if count >= 500 and count % 500 == 0:
                    print("=> {} 累计找到文章 {}".format(url, count))

            return data

        lines = self._getInput()

        self.destroy()

        filename = "竞品_" + datetime.now().strftime('%Y%m%d%H%M%S') + ".xls"

        if lines:
            workbook = xlwt.Workbook()
            sheet = workbook.add_sheet("sheet1")

            # witeheader
            for i, v in enumerate(headers):
                sheet.write(0, i, v)

            row = 1
            for hits in self.pool.map(export, lines):
                if hits:
                    for hit in hits:
                        for i, name in enumerate(headers):
                            sheet.write(row, i, hit.get(name))
                        row += 1

            workbook.save(filename)

        print("")
        print("导出完成! 已保存至 {}".format(filename))
        time.sleep(30)


App()

mainloop()

# pyinstaller.exe --onefile .\e.py
