#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Project: spd-sxmcc
"""
@author: lyndon
@time Created on 2018/12/10 10:30
@desc
"""
import jieba
from os import path, sep  # 用来获取文档的路径
from analyzer.conf.my_log_settings import logger
# 词云
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
# 词云生成工具
from wordcloud import WordCloud, ImageColorGenerator
# 需要对中文进行处理
import matplotlib.font_manager as fm
from analyzer.conf.settings import imgdir, ttffile


# print(path.abspath(__file__))
# print(path.dirname(__file__))

# config_dir = "data"


class JiebaCloud:

    def __init__(self, bgjpg, imgdir):
        self.imgdir = imgdir
        self.bgjpg = imgdir + sep + bgjpg
        # print('===== bgjpg ===== ' + self.bgjpg)
        # 背景图
        self.bg = np.array(Image.open(self.bgjpg))
        # 获取当前的项目文件加的路径
        self.d = path.dirname(__file__)
        # 读取停用词表
        self.stopwords_path = imgdir + sep + 'alice.txt'
        # 添加需要自定以的分词
        jieba.add_word("侯亮平")
        # 读取要分析的文本
        # text_path = imgdir + sep + "test.txt"
        self.ttf = ttffile
        # 读取要分析的文本，读取格式
        # self.text = open(path.join(self.d, text_path), 'r').read()

    # 定义个函数式用于分词
    def jiebaclearText(self, text_list):
        # 定义一个空的列表，将去除的停用词的分词保存
        mywordList = []
        for text in text_list:
            text = text['content']
            # print(text)
            # 进行分词
            # seg_list = jieba.cut(self.text, cut_all=False)
            seg_list = jieba.cut(text, cut_all=False)
            # 将一个generator的内容用/连接
            listStr = '/'.join(seg_list)
            # 打开停用词表
            f_stop = open(self.stopwords_path, 'r')
            # 读取
            try:
                f_stop_text = f_stop.read()
            finally:
                f_stop.close()  # 关闭资源
            # 将停用词格式化，用\n分开，返回一个列表
            f_stop_seg_list = f_stop_text.split("\n")
            # 对默认模式分词的进行遍历，去除停用词
            for myword in listStr.split('/'):
                # 去除停用词
                if not (myword.split()) in f_stop_seg_list and len(myword.strip()) > 1:
                    mywordList.append(myword)
        return ' '.join(mywordList)

    def gen_img(self, filefullname, text):
        # print(text)
        # filename = file_id + '.png'
        # filefullname = self.imgdir + sep + filename
        # 生成
        wc = WordCloud(
            background_color="white",
            max_words=150,
            mask=self.bg,  # 设置图片的背景
            max_font_size=60,
            random_state=42,
            font_path=self.ttf  # 中文处理，用系统自带的字体
        ).generate(text)
        # 为图片设置字体
        # my_font = fm.FontProperties(fname='C:/Windows/Fonts/simsunb.ttf')
        # 产生背景图片，基于彩色图像的颜色生成器
        image_colors = ImageColorGenerator(self.bg)
        # 开始画图
        plt.imshow(wc, interpolation="bilinear")
        # 为云图去掉坐标轴
        plt.axis("off")
        # 画云图，显示
        # plt.figure()
        # plt.show()
        # 为背景图去掉坐标轴
        plt.axis("off")
        plt.imshow(self.bg, cmap=plt.cm.gray)
        # plt.show()

        # 保存云图
        wc.to_file(filefullname)
        return filefullname


if __name__ == '__main__':
    # jiebacld = JiebaCloud("blnan.jpg", imgdir)
    #
    # # 获取当前的项目文件加的路径
    # d = path.dirname(__file__)
    # # 读取要分析的文本
    # text_path = imgdir + sep + "test.txt"
    # text = open(path.join(d, text_path), 'r').read()
    # text_list = [text]
    # text1 = jiebacld.jiebaclearText(text_list)
    # # print(text1)
    # filefullname = jiebacld.gen_img('13934578599', text1)
    # logger.info("generate the png:" + filefullname)
    # print(filefullname)
    phone_no = '13934578599'
    imgPathCfg = imgdir + sep + phone_no + '.png'
    body = {
        "size": 200,
        "query": {
            "match_phrase": {
                "msisdn": phone_no
            }
        }
    }
    from analyzer.conf.settings import est
    phone_columns = ['article_id']
    content_columns = ['content']
    article_ids = est.getattr_bybodyattr(index='tb_phone_url', doc_type='_doc', body=body, columns=phone_columns)

    content_list = []
    for i in article_ids:
        aid = i['article_id']
        # print(aid)
        content = est.getattr_byattr('tb_library', '_doc', aid, content_columns)
        content_list.append(content)

    # print(content_list)
    # for e in content_list:
    #     print('=======================================')
    #     print(e['content'])
    #     print('=======================================')

    jiebacld = JiebaCloud("man.jpg", imgdir)
    text1 = jiebacld.jiebaclearText(content_list)
    print('=====================================')
    print(text1)
    # imgPathCfg = jiebacld.gen_img(imgPathCfg, text1)


