#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'hello'
__mtime__ = '2019-01-16'
# code is far away from bugs with the god animal protecting
    I love animals. They taste delicious.
              ┏┓      ┏┓
            ┏┛┻━━━┛┻┓
            ┃      ☃      ┃
            ┃  ┳┛  ┗┳  ┃
            ┃      ┻      ┃
            ┗━┓      ┏━┛
                ┃      ┗━━━┓
                ┃  神兽保佑    ┣┓
                ┃　永无BUG！   ┏┛
                ┗┓┓┏━┳┓┏┛
                  ┃┫┫  ┃┫┫
                  ┗┻┛  ┗┻┛



                  包含违禁词：一天


总共耗时：   7733.64874291s
当前链接：   http://www.foodszs.com/news/119457.html

当前ID：   119457
第  24157   个链接
总链接数：   32070
关键词：   {"keywords": ["\u4e00\u5929"], "link": "http://www.foodszs.com/news/119457.html\r\n", "id": ["119457", "html\r\n"]}
None
None
总共耗时：   7734.05896187s
当前链接：   http://www.foodszs.com/news/119458.html

当前ID：   119458
第  24158   个链接
总链接数：   32070
关键词：   {}
None
None
总共耗时：   7734.18283391s
当前链接：   http://www.foodszs.com/news/119459.html

当前ID：   119459
第  24159   个链接
总链接数：   32070
关键词：   {}
Killed: 9




"""
import Queue
import pymysql
import urllib
import re
import time
import jieba
import json
import requests
from collections import Counter
import os
import sys
import xlwt
import xlrd
import threading
from flask import Flask
reload(sys)
sys.setdefaultencoding('utf8')

curr_dir = os.path.dirname(os.path.abspath(__file__))
filtered_words_txt_path = os.path.join(curr_dir,'sensitive_words.txt')
import chardet
# counts = {}
keys = []
data = []
counts=Counter()

app = Flask(__name__)

def filter_replace(string):
    string = string
    filtered_words = []
    with open(filtered_words_txt_path) as filtered_words_txt:
        lines = filtered_words_txt.readlines()
        for line in lines:
            filtered_words.append(line.strip())
    print replace(filtered_words, string)


def replace(filtered_words,string):
    new_string = string
    for words in filtered_words:
        if words in string:
            keys.append(words)


def getText(url):

    try:
        html=urllib.urlopen(url).read()
        html=unicode(html,'utf-8')

        # results = requests.get(url)

        word=re.findall(ur"[\u4e00-\u9fa5]+",html)
        s=""
        for w in word:
            s+=w


        seg_list=jieba.cut(s,cut_all=False)
        fenci="/ ".join(seg_list)
        # print 'get web-->',s
        # print 'div result-》',fenci


        print filter_replace(s)

        for i in keys:

            print '\033[0;32m包含违禁词：\033[0m\033[0;31m%s\033[0m' % i
            # print i
        # print '\033[0;32m文章链接：\033[0m\033[0;31m%s\033[0m' % url
        global  keys
        return keys
    except:
        f = open("failed.txt", "a")  # 设置文件对象
        global data
        f.writelines(url)  # 直接将文件中按行读到list里，效果与方法2一样
        f.close()  # 关闭文件

def connectDB():
    connect = pymysql.connect(host='219.148.37.182',port=2533,user='tyjzk_sa',password='wsdckyx7.4',database='new_foodszs')  # 建立连接
    if connect:
        print("连接成功!")

        cursor = connect.cursor()  # 创建一个游标对象,python里的sql语句都要通过cursor来执行
        cursor.execute("SELECT TOP 1 * FROM Food_News where title like '%最%';")  # 执行sql语句
        print 'info ====== %s' % cursor.fetchall()
        connect.commit()  # 提交

        cursor.close()  # 关闭游标
        connect.close()  # 关闭连接
    print '连接失败！'

def getdata():
    f = open("aa.txt", "r")  # 设置文件对象
    global  data
    data = f.readlines()  # 直接将文件中按行读到list里，效果与方法2一样
    f.close()  # 关闭文件

    for i,m in enumerate(data):
        q.put(m)



def getlink():

    while not q.empty():
        link = q.get()
        return link


def check_son():

    wbk = xlwt.Workbook()
    sheet = wbk.add_sheet('sheet 1')
    # sheet.write(0, 1, 'id')  # 第0行第一列写入内容
    # sheet.write(0, 2, '链接')
    # sheet.write(0, 3, '关键词')
    global num
    index = 0
    while True:

        try:
            link = getlink()
            keywords = getText(link)
            global keys
            keys = []



            time2 = time.time()
            time_num = str(time2 - time1)

            linkarr = link.split('/')
            id = linkarr[-1].split('.')

            dic={'keywords':keywords}

            print 'keywords ===== %s ' % keywords[0]

            words_str = ''
            for i in keywords:
                print 'i ===== %s ' % i
                words_str.join(i)

            if len(keywords) > 0:
                dic = {'keywords':keywords}
                f = open("aa.txt", "r")  # 设置文件对象
                global data
                data = f.readlines()  # 直接将文件中按行读到list里，效果与方法2一样
                f.close()  # 关闭文件
                sheet.write(index, 0, '%s' % id[0])  # 第0行第一列写入内容
                sheet.write(index, 1, '%s' % link)
                sheet.write(index, 2, '%s' % json.dumps(dic))
                print 'key_str ======= %s ' % json.dumps(dic)
                index = index + 1
                # sheet.write(index, 2, 'company%s' % index)  # 第0行第er列写入内容
                # sheet.write(index, 3, '%s' % i)
                print
                '\033[1;32mSaving : \033[1;31m %s \033[0m \033[0m' % num
                print
                '\033[1;33mSaved Sum: \033[1;31m %s \033[0m \033[0m' % num
                wbk.save('companyInfo2.xls')
            # print '\033[0;32m文章链接：\033[0m \033[0;31m%s\033[0m' % url
            print('\033[0;32m总共耗时：\033[0m  \033[0;31m %ss \033[0m ' % time_num)
            print('\033[0;32m当前链接：\033[0m  \033[0;31m %s \033[0m ' % link)
            print('\033[0;32m当前ID：\033[0m  \033[0;31m %s \033[0m ' % id[0])
            print('\033[0;32m第 \033[0;31m %s \033[0m \033[0;32m 个链接\033[0m\033[0m   ' % num)
            print('\033[0;32m总链接数：\033[0m  \033[0;31m %s \033[0m ' % len(data))
            print('\033[0;32m关键词：\033[0m  \033[0;31m %s \033[0m ' % json.dumps(dic,ensure_ascii=True, encoding="utf-8"))



            num = num + 1
            # print 'link is ====== %s \ncurnt num is : %d ' % (link, num)
            # time.sleep(1)
        except:
            continue

def get_data():
    data = xlrd.open_workbook('companyInfo1.xls')
    table = data.sheet_by_name('sheet 1')  # 通过名称获取
    cell = table.cell(0, 0).value
    cell1 = table.cell(0, 1).value
    cell2 = table.cell(0, 2).value


    cell3 = cell2.replace('[', '').replace(']', '')
    cell3 = cell3.split(',')
    str_list = []
    for i in range(len(cell3)):
        cell3[i] = cell3[i].replace("'", '')
        str_list.append(cell3[i])
        # case_list_righ = str(case_list).replace('u\'', '\'')
        # print case_list_righ.decode("unicode-escape")
    print str(str_list[0]).decode('UTF-8').encode('GBK')



    # print 'cell ======= %s cell1 ===== %s  cell2 ====== %s' % (cell,cell1,cell3[0].decode('unicode_escape'))

@app.route('/')
def index():
    return 'succes'

if __name__ == '__main__':

    app.run()

    time1 = time.time()

    q = Queue.Queue()
    num = 0

    # get_data()

    article_url = 'https://mp.weixin.qq.com/s?src=11&timestamp=1547595002&ver=1317&signature=obczFvYcDBQXvTF65xmJdmhjFuH6VjpnWFApvMV*Hv7aPyXgc0faY4jfhf07drRaSygokDf5i5WDLGopLMrDMCWOTL*SP208ohcVTxGTfmJrsRCmDcmJBtHcGHB3gPEh&new=1'
    article_url2 = 'https://mp.weixin.qq.com/s?src=11&timestamp=1547604001&ver=1317&signature=vnMsHhnfiMsOKO3CKxyiYNdV8-LvvxDODgc**n1m*sEnSyY696bXAkLVXTYv1P3Y6EEv4ar09c6MxO1Ey8aKigLbRBI*pQtI6PEyksCxfAWwCDJK8IVyddMBA7K1wew-&new=1'
    article_url3 = 'https://mp.weixin.qq.com/s?src=11&timestamp=1547604001&ver=1317&signature=274944QbG4NU1J*DgcVjevzPNPBd6MklHtdJGZ3*FZOiWvbjuUc-Tq4WqlQkt1VnnTcWBZw7ZSJ5GUizXMIrsxoBUiTeALbuVYcRT9VHez14GzTKjlW2ybvoF0adFJOm&new=1'
    article_url4 = 'https://mp.weixin.qq.com/s?src=11&timestamp=1547604001&ver=1317&signature=e8iDHXY*yiDqxsubveWwPTC8jBmrEj7IamJOfgkFuE8HJCKn0butVIRZNPyhFhbX66Uax*5zHVujQXpqwuzY1Lo18N3xNzRY5KDgIX2Dm26JFSJbZFcYWCp*ZDMFGq0o&new=1'
    article_url5 = 'https://mp.weixin.qq.com/s?src=11&timestamp=1547609459&ver=1317&signature=yHLb6kDSWv8JEjJyDA1Sbx2Uieny7RANmUEY8-agUaGNL3QOvUPH3NeQ-jz0BUetZ9u1MQfO-hX-s7Oog2u73tNJJRQAvjfwKAkpK6LHV1AtCFXtY-u0-KzKKDq9kStc&new=1'
    article_url6 = 'http://www.foodszs.com/news/127310.html'
    getdata()
    check_son()
    # threads = []
    # for i in range(3):
    #     t = threading.Thread(target=check_son())
    #     t.start()





