# -*- coding: utf-8 -*-
"""
Created on Wed Apr  8 19:03:49 2020

@author: 10617
"""

from flask import Flask, request, session, g, redirect, url_for, abort, \
    render_template, flash
from datetime import datetime
from gensim.models.doc2vec import Doc2Vec, LabeledSentence

import jieba
import wordcloud
import os
import sqlite3
import markdata1
import spider
import re
import time
import requests
import json
import csv
import random
import sys
import gensim
import sklearn
import numpy as np
import identify


# def connect_db():
#     """Connects to the specific database."""
#     rv = sqlite3.connect(app.config['DATABASE'])
#     rv.row_factory = sqlite3.Row
#     return rv

# def get_db():
#     """Opens a new database connection if there is none yet for the
#     current application context.
#     """
#     if not hasattr(g, 'sqlite_db'):
#         g.sqlite_db = connect_db()
#     return g.sqlite_db

# def init_db():
#     with app.app_context():
#         db = get_db()
#         with app.open_resource('schema.sql', mode='r') as f:
#             db.cursor().executescript(f.read())
#         db.commit()   

# def  dataImport(csvpath,dbpath,tablename):
#     reader = csv.DictReader(open(csvpath,"rb"),delimiter=',',quoting=csv.QUOTE_MINIMAL)
#     conn = sqlite3.connect(dbpath)
#     # shz: fix error with non-ASCII input
#     conn.text_factory = str
#     c = conn.cursor()
#     create_query = 'CREATE TABLE '+tablename +' ("cn" TEXT,"en" TEXT,"lat" DOUBLE,"lon" DOUBLE,"points" DOUBLE,"count" INTEGER,"intro" TEXT,"photo" TEXT,"url" TEXT,"content" TEXT)' 
#     c.execute(create_query)
#     for row in reader:
#         print(row)
#         to_db = [row['cn'], row['en'],row['lat'],row['lon'],row['points'],row['count'],row['intro'],row['photo'],row['url'],row['content']]
#         c.execute('INSERT INTO '+tablename+' (cn, en, lat,lon,points,count,intro,photo,url,content) VALUES (?, ?, ?,?, ?, ?,?, ?, ?,?);', to_db)
#     conn.commit()

def wordcloud_():
    # 构建并配置词云对象w
    w = wordcloud.WordCloud(width=1000,
                        height=700,
                        background_color='white',
                        font_path='msyh.ttc',
                        collocations=False)

    # 对来自外部文件的文本进行中文分词，得到string
    f = open('./data/content.txt',encoding='utf-8')
    txt = f.read()
    txtlist = jieba.lcut(txt)
    string = " ".join(txtlist)

    # 将string变量传入w的generate()方法，给词云输入文字
    w.generate(string)

    # 将词云图片导出到当前文件夹
    w.to_file('./static/img/output5-village.png')
    f.close()


class CustomFlask(Flask):
    jinja_options = Flask.jinja_options.copy()
    jinja_options.update(dict(
      block_start_string='{%',
      block_end_string='%}',
      variable_start_string='((',
      variable_end_string='))',
      comment_start_string='{#',
      comment_end_string='#}',
    ))

app = CustomFlask(__name__)

if(os.path.exists("./data/testcsv.csv")):
            os.remove("./data/testcsv.csv")

@app.route('/',methods=['POST','GET'])
def index():
    if request.method == 'POST':
        geturl=request.form['geturl']
        with open('./data/URL.txt', 'w') as f:     
            f.write(geturl)
        if(os.path.exists("./data/testcsv.csv")):
            os.remove("./data/testcsv.csv")
        file = open("./data/URL.txt", "r")
        # 这个txt是URL集合：把想爬的一个或多个商品天猫URL丢进这个txt即可，URL间以换行符间隔即可
        urllist = file.readlines()  # 每一行数据写入到list中
        file.close()
        spider.header()
        for fields in urllist:
            m_info = open("./data/medicineInfo.txt","w",encoding="utf-8-sig")
            itemID = (re.search(re.compile(".*&id=?(.*?)&.*"), fields)).group(1)
            sellerID = re.search(re.compile(".*&user_id=?(.*?)&.*"), fields).group(1)
            print("当前爬取商品号："+itemID+" 店铺号："+sellerID)
            m_info.write("商品号:"+itemID+"\n")
            m_info.write("店铺号:"+sellerID+"\n")

            THEheaders = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36",
            "referer": "https://detail.tmall.com/item.htm?spm=a220m.1000858.1000725.8.12357de7uI1uOu&id=602977570356&skuId=4304210164437&areaId=510100&user_id=3351172141&cat_id=2&is_b=1&rn=d2044f57561b87fd83ad00ce338f0cd1&on_comment=1",
            "Cookie": "lid=%E8%B6%85%E5%A8%81%E8%93%9D%E7%8C%AB%E6%88%91%E6%9C%89%E7%9F%A5%E8%AF%86%E6%88%91%E8%87%AA%E8%B1%AA; enc=XJwcK4DybRwOk4R5bu%2FJtcXN2m4Kb5LngN9e7EDyoQu6UugNceB3rwsTxLri3HUWF6mWjfbvovS8HVZhYUcAHA%3D%3D; cna=IakcF4CQm3gCAXW7+dxgQxGp; sgcookie=E2lPAmrso4SdnP1qLE8a6; t=1f89b53da080fcee11386787b0604be1; tracknick=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; lgc=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; _tb_token_=318b35fe300eb; cookie2=1e4c25b69b94c9fe58758553009376f5; _m_h5_tk=ebb423f10f34ab8a0296615c7bb2662e_1587917103060; _m_h5_tk_enc=f7ee88a93e9cc1354e99a82f68a8055e; x5sec=7b22726174656d616e616765723b32223a223837333430666631343037653137653938396533306232363963666561346464434c61356c765546454d614f6d624f726d39486f3167453d227d; dnk=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; uc1=existShop=false&cookie14=UoTUPcXJy2NZ6g%3D%3D&cookie15=UIHiLt3xD8xYTw%3D%3D&cookie21=VT5L2FSpccLuJBreK%2BBd&pas=0&cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D; uc3=nk2=0Jw4ZWKL5ncb%2BZbc%2BzeuWaIvSDzGqg%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D&id2=UU6kVzP5MUeA7w%3D%3D&vt3=F8dBxGRyNbP9gah03fE%3D; _l_g_=Ug%3D%3D; uc4=nk4=0%400hYwK0dP9pa%2BHtt%2BZ%2BnIIJJ3vuOLV1en5URtcwfplD3h&id4=0%40U2xpVauIQpog3DB9k9NdS5VHJyyf; unb=2654376244; cookie1=VW7p1uG6ehuQnGiMx3e1bs9aTbIAwbuKJpX8eqzeCcI%3D; login=true; cookie17=UU6kVzP5MUeA7w%3D%3D; _nk_=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; sg=%E8%B1%AA42; csg=17e13678; l=eBSn_cgIQpsWV4I3BOfaFurza77OSIRYYuPzaNbMiT5P_WCW5_kOWZjSZW8XC31Vh6lyR3yAaP04BeYBqIq0x6aNa6Fy_Ckmn; isg=BD09weAUP_lJfZulrCdScNF0TJk32nEsVtiNIf-CeRTBNl1oxyqB_Avg4OpwrYnk"
            # 我自己的Cookie，用来绕过登陆框，可能会失效（目前倒是没遇到过）
            }
            r=requests.get(fields,headers=THEheaders)
            it = re.finditer(r'<li title="&nbsp;.*?">?(.*?)&nbsp;?(.*?)</li>',r.text )
            for match in it:
                m_info.write(match.group(1)+re.sub('&nbsp','',match.group(2))+"\n")
                # print(match.group(1)+re.sub('&nbsp','',match.group(2)))
            m_info.close()

            for i in range(1,6):  # 前5页评论
                new_data = spider.taobaoSpider_content(itemId=itemID, sellerId=sellerID, currentPage=i)
                print('爬取第%d页中...'%(i))
                if new_data.main() != None:
                    for items in new_data.main():
                        # new_data.write_txt(items)
                        new_data.write_csv(items)
                else:
                    pass
                time.sleep(random.randint(3, 6))  # 设置延时防止爬虫被封
    
        #获取数据
        x_train = markdata1.get_datasest()
        #读取模型
        #./modle/doc2vec3.model可以改为/modle/doc2vec3.model
        model_dm = Doc2Vec.load("./modle/doc2vec3.model")
        #处理数据
        markdata1.test() 

        with open("./data/testcsv.csv",encoding="utf-8-sig") as csvfile:
            reader = csv.DictReader(csvfile)
            txtfile = open("./data/content.txt","w",encoding="utf-8-sig")
            for row in reader:
                txtfile.write("\""+row.get("CONTENT")+"\""+"\n")
            txtfile.close()

        medicine=[]
        with open('./data/medicineInfo.txt','r',encoding="utf-8-sig") as f:
            for line in f:
                medicine.append(line)
        
    elif(os.path.exists("./data/testcsv.csv")):
        medicine=[]
        with open('./data/medicineInfo.txt','r',encoding="utf-8-sig") as f:
            for line in f:
                medicine.append(line)
        
    else:
        medicine=[]
        print(len(medicine))
    return render_template('index.html',medicine=medicine)



@app.route('/tables',methods=['POST','GET'])
def tables():
    # with open("./data/testcsv - 副本 (3).csv",'r',encoding="utf-8-sig") as f:
    #     reader = csv.reader(f)
    #     fieldnames = next(reader)#获取数据的第一列，作为后续要转为字典的键名 生成器，next方法获取
    #     # print(fieldnames)
    #     csv_reader = csv.DictReader(f,fieldnames=fieldnames) #self._fieldnames = fieldnames # list of keys for the dict 以list的形式存放键名
    #     for row in csv_reader:
    #         comments={}
    #         for k,v in row.items():
    #             comments[k]=v
    #     print(comments)
    if request.method == 'POST':
        geturl=request.form['geturl']
        with open('./data/URL.txt', 'w') as f:     
            f.write(geturl)
        if(os.path.exists("./data/testcsv.csv")):
            os.remove("./data/testcsv.csv")
        file = open("./data/URL.txt", "r")
        # 这个txt是URL集合：把想爬的一个或多个商品天猫URL丢进这个txt即可，URL间以换行符间隔即可
        urllist = file.readlines()  # 每一行数据写入到list中
        file.close()
        spider.header()
        for fields in urllist:
            m_info = open("./data/medicineInfo.txt","w",encoding="utf-8-sig")
            itemID = (re.search(re.compile(".*&id=?(.*?)&.*"), fields)).group(1)
            sellerID = re.search(re.compile(".*&user_id=?(.*?)&.*"), fields).group(1)
            print("当前爬取商品号："+itemID+" 店铺号："+sellerID)
            m_info.write("商品号:"+itemID+"\n")
            m_info.write("店铺号:"+sellerID+"\n")

            THEheaders = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36",
            "referer": "https://detail.tmall.com/item.htm?spm=a220m.1000858.1000725.8.12357de7uI1uOu&id=602977570356&skuId=4304210164437&areaId=510100&user_id=3351172141&cat_id=2&is_b=1&rn=d2044f57561b87fd83ad00ce338f0cd1&on_comment=1",
            "Cookie": "lid=%E8%B6%85%E5%A8%81%E8%93%9D%E7%8C%AB%E6%88%91%E6%9C%89%E7%9F%A5%E8%AF%86%E6%88%91%E8%87%AA%E8%B1%AA; enc=XJwcK4DybRwOk4R5bu%2FJtcXN2m4Kb5LngN9e7EDyoQu6UugNceB3rwsTxLri3HUWF6mWjfbvovS8HVZhYUcAHA%3D%3D; cna=IakcF4CQm3gCAXW7+dxgQxGp; sgcookie=E2lPAmrso4SdnP1qLE8a6; t=1f89b53da080fcee11386787b0604be1; tracknick=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; lgc=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; _tb_token_=318b35fe300eb; cookie2=1e4c25b69b94c9fe58758553009376f5; _m_h5_tk=ebb423f10f34ab8a0296615c7bb2662e_1587917103060; _m_h5_tk_enc=f7ee88a93e9cc1354e99a82f68a8055e; x5sec=7b22726174656d616e616765723b32223a223837333430666631343037653137653938396533306232363963666561346464434c61356c765546454d614f6d624f726d39486f3167453d227d; dnk=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; uc1=existShop=false&cookie14=UoTUPcXJy2NZ6g%3D%3D&cookie15=UIHiLt3xD8xYTw%3D%3D&cookie21=VT5L2FSpccLuJBreK%2BBd&pas=0&cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D; uc3=nk2=0Jw4ZWKL5ncb%2BZbc%2BzeuWaIvSDzGqg%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D&id2=UU6kVzP5MUeA7w%3D%3D&vt3=F8dBxGRyNbP9gah03fE%3D; _l_g_=Ug%3D%3D; uc4=nk4=0%400hYwK0dP9pa%2BHtt%2BZ%2BnIIJJ3vuOLV1en5URtcwfplD3h&id4=0%40U2xpVauIQpog3DB9k9NdS5VHJyyf; unb=2654376244; cookie1=VW7p1uG6ehuQnGiMx3e1bs9aTbIAwbuKJpX8eqzeCcI%3D; login=true; cookie17=UU6kVzP5MUeA7w%3D%3D; _nk_=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; sg=%E8%B1%AA42; csg=17e13678; l=eBSn_cgIQpsWV4I3BOfaFurza77OSIRYYuPzaNbMiT5P_WCW5_kOWZjSZW8XC31Vh6lyR3yAaP04BeYBqIq0x6aNa6Fy_Ckmn; isg=BD09weAUP_lJfZulrCdScNF0TJk32nEsVtiNIf-CeRTBNl1oxyqB_Avg4OpwrYnk"
            # 我自己的Cookie，用来绕过登陆框，可能会失效（目前倒是没遇到过）
            }
            r=requests.get(fields,headers=THEheaders)
            it = re.finditer(r'<li title="&nbsp;.*?">?(.*?)&nbsp;?(.*?)</li>',r.text )
            for match in it:
                m_info.write(match.group(1)+re.sub('&nbsp','',match.group(2))+"\n")
                # print(match.group(1)+re.sub('&nbsp','',match.group(2)))
            m_info.close()

            for i in range(1,6):  # 前5页评论
                new_data = spider.taobaoSpider_content(itemId=itemID, sellerId=sellerID, currentPage=i)
                print('爬取第%d页中...'%(i))
                if new_data.main() != None:
                    for items in new_data.main():
                        # new_data.write_txt(items)
                        new_data.write_csv(items)
                else:
                    pass
                time.sleep(random.randint(3, 6))  # 设置延时防止爬虫被封
    
        #获取数据
        x_train = markdata1.get_datasest()
        #读取模型
        #./modle/doc2vec3.model可以改为/modle/doc2vec3.model
        model_dm = Doc2Vec.load("./modle/doc2vec3.model")
        #处理数据
        markdata1.test() 

        with open("./data/testcsv.csv",encoding="utf-8-sig") as csvfile:
            reader = csv.DictReader(csvfile)
            txtfile = open("./data/content.txt","w",encoding="utf-8-sig")
            for row in reader:
                txtfile.write("\""+row.get("CONTENT")+"\""+"\n")
            txtfile.close()

        comments = list(csv.reader(open('./data/testcsv.csv',encoding="utf-8-sig")))
        comments = comments[1:]
    elif(os.path.exists("./data/testcsv.csv")):
        comments = list(csv.reader(open('./data/testcsv.csv',encoding="utf-8-sig")))
        comments = comments[1:]  
    else:
        comments=[]    
    return render_template('tables.html',comments=comments)

@app.route('/charts',methods=['POST','GET'])
def charts():
    if request.method == 'POST':
        geturl=request.form['geturl']
        with open('./data/URL.txt', 'w') as f:     
            f.write(geturl)
        if(os.path.exists("./data/testcsv.csv")):
            os.remove("./data/testcsv.csv")
        file = open("./data/URL.txt", "r")
        # 这个txt是URL集合：把想爬的一个或多个商品天猫URL丢进这个txt即可，URL间以换行符间隔即可
        urllist = file.readlines()  # 每一行数据写入到list中
        file.close()
        spider.header()
        for fields in urllist:
            m_info = open("./data/medicineInfo.txt","w",encoding="utf-8-sig")
            itemID = (re.search(re.compile(".*&id=?(.*?)&.*"), fields)).group(1)
            sellerID = re.search(re.compile(".*&user_id=?(.*?)&.*"), fields).group(1)
            print("当前爬取商品号："+itemID+" 店铺号："+sellerID)
            m_info.write("商品号:"+itemID+"\n")
            m_info.write("店铺号:"+sellerID+"\n")

            THEheaders = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36",
            "referer": "https://detail.tmall.com/item.htm?spm=a220m.1000858.1000725.8.12357de7uI1uOu&id=602977570356&skuId=4304210164437&areaId=510100&user_id=3351172141&cat_id=2&is_b=1&rn=d2044f57561b87fd83ad00ce338f0cd1&on_comment=1",
            "Cookie": "lid=%E8%B6%85%E5%A8%81%E8%93%9D%E7%8C%AB%E6%88%91%E6%9C%89%E7%9F%A5%E8%AF%86%E6%88%91%E8%87%AA%E8%B1%AA; enc=XJwcK4DybRwOk4R5bu%2FJtcXN2m4Kb5LngN9e7EDyoQu6UugNceB3rwsTxLri3HUWF6mWjfbvovS8HVZhYUcAHA%3D%3D; cna=IakcF4CQm3gCAXW7+dxgQxGp; sgcookie=E2lPAmrso4SdnP1qLE8a6; t=1f89b53da080fcee11386787b0604be1; tracknick=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; lgc=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; _tb_token_=318b35fe300eb; cookie2=1e4c25b69b94c9fe58758553009376f5; _m_h5_tk=ebb423f10f34ab8a0296615c7bb2662e_1587917103060; _m_h5_tk_enc=f7ee88a93e9cc1354e99a82f68a8055e; x5sec=7b22726174656d616e616765723b32223a223837333430666631343037653137653938396533306232363963666561346464434c61356c765546454d614f6d624f726d39486f3167453d227d; dnk=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; uc1=existShop=false&cookie14=UoTUPcXJy2NZ6g%3D%3D&cookie15=UIHiLt3xD8xYTw%3D%3D&cookie21=VT5L2FSpccLuJBreK%2BBd&pas=0&cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D; uc3=nk2=0Jw4ZWKL5ncb%2BZbc%2BzeuWaIvSDzGqg%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D&id2=UU6kVzP5MUeA7w%3D%3D&vt3=F8dBxGRyNbP9gah03fE%3D; _l_g_=Ug%3D%3D; uc4=nk4=0%400hYwK0dP9pa%2BHtt%2BZ%2BnIIJJ3vuOLV1en5URtcwfplD3h&id4=0%40U2xpVauIQpog3DB9k9NdS5VHJyyf; unb=2654376244; cookie1=VW7p1uG6ehuQnGiMx3e1bs9aTbIAwbuKJpX8eqzeCcI%3D; login=true; cookie17=UU6kVzP5MUeA7w%3D%3D; _nk_=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; sg=%E8%B1%AA42; csg=17e13678; l=eBSn_cgIQpsWV4I3BOfaFurza77OSIRYYuPzaNbMiT5P_WCW5_kOWZjSZW8XC31Vh6lyR3yAaP04BeYBqIq0x6aNa6Fy_Ckmn; isg=BD09weAUP_lJfZulrCdScNF0TJk32nEsVtiNIf-CeRTBNl1oxyqB_Avg4OpwrYnk"
            # 我自己的Cookie，用来绕过登陆框，可能会失效（目前倒是没遇到过）
            }
            r=requests.get(fields,headers=THEheaders)
            it = re.finditer(r'<li title="&nbsp;.*?">?(.*?)&nbsp;?(.*?)</li>',r.text )
            for match in it:
                m_info.write(match.group(1)+re.sub('&nbsp','',match.group(2))+"\n")
                # print(match.group(1)+re.sub('&nbsp','',match.group(2)))
            m_info.close()

            for i in range(1,6):  # 前5页评论
                new_data = spider.taobaoSpider_content(itemId=itemID, sellerId=sellerID, currentPage=i)
                print('爬取第%d页中...'%(i))
                if new_data.main() != None:
                    for items in new_data.main():
                        # new_data.write_txt(items)
                        new_data.write_csv(items)
                else:
                    pass
                time.sleep(random.randint(3, 6))  # 设置延时防止爬虫被封
    
        #获取数据
        x_train = markdata1.get_datasest()
        #读取模型
        #./modle/doc2vec3.model可以改为/modle/doc2vec3.model
        model_dm = Doc2Vec.load("./modle/doc2vec3.model")
        #处理数据
        markdata1.test() 

        with open("./data/testcsv.csv",encoding="utf-8-sig") as csvfile:
            reader = csv.DictReader(csvfile)
            txtfile = open("./data/content.txt","w",encoding="utf-8-sig")
            for row in reader:
                txtfile.write("\""+row.get("CONTENT")+"\""+"\n")
            txtfile.close()

        comments = list(csv.reader(open('./data/testcsv.csv',encoding="utf-8-sig")))
        comments = comments[1:]
        positive = 0
        negative = 0
        middle = 0
        num = len(comments)
        for comment in comments:
            if float(comment[6]) <= 0.3:
                negative += 1
            elif float(comment[6]) >= 0.7:
                positive += 1
            else:
                middle += 1
        p_pro = round(positive/num,4)*100
        m_pro = round(middle/num,4)*100
        n_pro = round(negative/num,4)*100
        percent = [p_pro , m_pro , n_pro]
        print(percent)
        amount = [positive , middle , negative]
        wordcloud_()

        words_value = identify.words()
        score_var = []
        file_name = './data/content.txt'
        with open(file_name, 'r', encoding='utf-8', errors='ignore') as f:
            for line in f.readlines():
                # print(line)
                # print(type(line))
                segList = jieba.cut(line)
                segResult = []
                for w in segList:
                    segResult.append(w)
                with open('情感词典\\Stopwordlist.txt', 'r', encoding='GB2312') as f:
                    stopwords = f.readlines()
                    # print(stopwords)
                    newSent = []
                    for word in segResult:
                        if word + '\n' in stopwords:
                            continue
                        else:
                            newSent.append(word)
                    datafen_dist = {}
                    for x in range(0, len(newSent)):
                        datafen_dist[newSent[x]] = x
                    # datafen_dist=listToDist(data)
                    # print(datafen_dist)
                    data_1 = identify.classifywords(datafen_dist, words_value[0], words_value[1], words_value[2])
                    # print('\n1\n',data_1[0],'\n2\n',data_1[1],'\n3\n',data_1[2])
                    segResult_P = []
                    segList_P = jieba.cut(line)
                    for w in segList_P:
                        segResult_P.append(w)
                    data_2 = identify.scoreSent(data_1[0], data_1[1], data_1[2], newSent)
                    # print(data_2)
                    score_var.append(data_2)
        # print(score_var,'\n\n')
        good = 0
        normal = 0
        bad = 0
        binary = 0
        for score in score_var:
            if score > 0:
                good = good + 1
            elif score < 0:
                bad = bad + 1
            else:
                normal = normal + 1
        print('good_comments:', good, 'normal_comments:', normal, 'bad_comments:', bad, 'Total_comments:',
            good + normal + bad)
        good_comments_rate = good / (good + normal + bad)-0.3
        rate = good / (good + normal + bad)
        if rate < 0.5 and good_comments_rate < 0.5:
            binary = 1
        else:
            binary = 0
        print('文本评论真实率：%.2f%%' % (good_comments_rate * 100),'是否恶意差评：',binary)
        real = round(good_comments_rate*100,2)
        key = 1
        possibility=[real,100-real]
    elif(os.path.exists("./data/testcsv.csv")):
        comments = list(csv.reader(open('./data/testcsv.csv',encoding="utf-8-sig")))
        comments = comments[1:]
        positive = 0
        negative = 0
        middle = 0
        num = len(comments)
        for comment in comments:
            if float(comment[6]) <= 0.3:
                negative += 1
            elif float(comment[6]) >= 0.7:
                positive += 1
            else:
                middle += 1
        p_pro = round((positive/num)*100,2)
        m_pro = round((middle/num)*100,2)
        n_pro = round((negative/num)*100,2)
        percent = [p_pro , m_pro , n_pro]
        print(percent)
        amount = [positive , middle , negative]
        wordcloud_()
        
        words_value = identify.words()
        score_var = []
        file_name = './data/content.txt'
        with open(file_name, 'r', encoding='utf-8', errors='ignore') as f:
            for line in f.readlines():
                # print(line)
                # print(type(line))
                segList = jieba.cut(line)
                segResult = []
                for w in segList:
                    segResult.append(w)
                with open('情感词典\\Stopwordlist.txt', 'r', encoding='GB2312') as f:
                    stopwords = f.readlines()
                    # print(stopwords)
                    newSent = []
                    for word in segResult:
                        if word + '\n' in stopwords:
                            continue
                        else:
                            newSent.append(word)
                    datafen_dist = {}
                    for x in range(0, len(newSent)):
                        datafen_dist[newSent[x]] = x
                    # datafen_dist=listToDist(data)
                    # print(datafen_dist)
                    data_1 = identify.classifywords(datafen_dist, words_value[0], words_value[1], words_value[2])
                    # print('\n1\n',data_1[0],'\n2\n',data_1[1],'\n3\n',data_1[2])
                    segResult_P = []
                    segList_P = jieba.cut(line)
                    for w in segList_P:
                        segResult_P.append(w)
                    data_2 = identify.scoreSent(data_1[0], data_1[1], data_1[2], newSent)
                    # print(data_2)
                    score_var.append(data_2)
        # print(score_var,'\n\n')
        good = 0
        normal = 0
        bad = 0
        binary = 0
        for score in score_var:
            if score > 0:
                good = good + 1
            elif score < 0:
                bad = bad + 1
            else:
                normal = normal + 1
        print('good_comments:', good, 'normal_comments:', normal, 'bad_comments:', bad, 'Total_comments:',
            good + normal + bad)
        good_comments_rate = good / (good + normal + bad)-0.3
        rate = good / (good + normal + bad)
        if rate < 0.5 and good_comments_rate < 0.5:
            binary = 1
        else:
            binary = 0
        print('文本评论真实率：%.2f%%' % (good_comments_rate * 100),'是否恶意差评：',binary)
        real = round(good_comments_rate*100,2)
        key = 1
        possibility=[real,100-real]
    else:
        percent=[]
        amount=[]
        possibility=[-1,-1]
        key = 0
        binary = -1
    return render_template('charts.html',percent=percent,amount=amount,possibility=possibility,key=key,binary=binary)

@app.route('/store',methods=['POST','GET'])
def store():
    if request.method == 'POST':
        geturl=request.form['geturl']
        with open('./data/URL.txt', 'w') as f:     
            f.write(geturl)
        if(os.path.exists("./data/testcsv.csv")):
            os.remove("./data/testcsv.csv")
        file = open("./data/URL.txt", "r")
        # 这个txt是URL集合：把想爬的一个或多个商品天猫URL丢进这个txt即可，URL间以换行符间隔即可
        urllist = file.readlines()  # 每一行数据写入到list中
        file.close()
        spider.header()
        for fields in urllist:
            m_info = open("./data/medicineInfo.txt","w",encoding="utf-8-sig")
            itemID = (re.search(re.compile(".*&id=?(.*?)&.*"), fields)).group(1)
            sellerID = re.search(re.compile(".*&user_id=?(.*?)&.*"), fields).group(1)
            print("当前爬取商品号："+itemID+" 店铺号："+sellerID)
            m_info.write("商品号:"+itemID+"\n")
            m_info.write("店铺号:"+sellerID+"\n")

            THEheaders = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36",
            "referer": "https://detail.tmall.com/item.htm?spm=a220m.1000858.1000725.8.12357de7uI1uOu&id=602977570356&skuId=4304210164437&areaId=510100&user_id=3351172141&cat_id=2&is_b=1&rn=d2044f57561b87fd83ad00ce338f0cd1&on_comment=1",
            "Cookie": "lid=%E8%B6%85%E5%A8%81%E8%93%9D%E7%8C%AB%E6%88%91%E6%9C%89%E7%9F%A5%E8%AF%86%E6%88%91%E8%87%AA%E8%B1%AA; enc=XJwcK4DybRwOk4R5bu%2FJtcXN2m4Kb5LngN9e7EDyoQu6UugNceB3rwsTxLri3HUWF6mWjfbvovS8HVZhYUcAHA%3D%3D; cna=IakcF4CQm3gCAXW7+dxgQxGp; sgcookie=E2lPAmrso4SdnP1qLE8a6; t=1f89b53da080fcee11386787b0604be1; tracknick=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; lgc=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; _tb_token_=318b35fe300eb; cookie2=1e4c25b69b94c9fe58758553009376f5; _m_h5_tk=ebb423f10f34ab8a0296615c7bb2662e_1587917103060; _m_h5_tk_enc=f7ee88a93e9cc1354e99a82f68a8055e; x5sec=7b22726174656d616e616765723b32223a223837333430666631343037653137653938396533306232363963666561346464434c61356c765546454d614f6d624f726d39486f3167453d227d; dnk=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; uc1=existShop=false&cookie14=UoTUPcXJy2NZ6g%3D%3D&cookie15=UIHiLt3xD8xYTw%3D%3D&cookie21=VT5L2FSpccLuJBreK%2BBd&pas=0&cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D; uc3=nk2=0Jw4ZWKL5ncb%2BZbc%2BzeuWaIvSDzGqg%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D&id2=UU6kVzP5MUeA7w%3D%3D&vt3=F8dBxGRyNbP9gah03fE%3D; _l_g_=Ug%3D%3D; uc4=nk4=0%400hYwK0dP9pa%2BHtt%2BZ%2BnIIJJ3vuOLV1en5URtcwfplD3h&id4=0%40U2xpVauIQpog3DB9k9NdS5VHJyyf; unb=2654376244; cookie1=VW7p1uG6ehuQnGiMx3e1bs9aTbIAwbuKJpX8eqzeCcI%3D; login=true; cookie17=UU6kVzP5MUeA7w%3D%3D; _nk_=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; sg=%E8%B1%AA42; csg=17e13678; l=eBSn_cgIQpsWV4I3BOfaFurza77OSIRYYuPzaNbMiT5P_WCW5_kOWZjSZW8XC31Vh6lyR3yAaP04BeYBqIq0x6aNa6Fy_Ckmn; isg=BD09weAUP_lJfZulrCdScNF0TJk32nEsVtiNIf-CeRTBNl1oxyqB_Avg4OpwrYnk"
            # 我自己的Cookie，用来绕过登陆框，可能会失效（目前倒是没遇到过）
            }
            r=requests.get(fields,headers=THEheaders)
            it = re.finditer(r'<li title="&nbsp;.*?">?(.*?)&nbsp;?(.*?)</li>',r.text )
            for match in it:
                m_info.write(match.group(1)+re.sub('&nbsp','',match.group(2))+"\n")
                # print(match.group(1)+re.sub('&nbsp','',match.group(2)))
            m_info.close()

            for i in range(1,6):  # 前5页评论
                new_data = spider.taobaoSpider_content(itemId=itemID, sellerId=sellerID, currentPage=i)
                print('爬取第%d页中...'%(i))
                if new_data.main() != None:
                    for items in new_data.main():
                        # new_data.write_txt(items)
                        new_data.write_csv(items)
                else:
                    pass
                time.sleep(random.randint(3, 6))  # 设置延时防止爬虫被封
    
        #获取数据
        x_train = markdata1.get_datasest()
        #读取模型
        #./modle/doc2vec3.model可以改为/modle/doc2vec3.model
        model_dm = Doc2Vec.load("./modle/doc2vec3.model")
        #处理数据
        markdata1.test() 

        with open("./data/testcsv.csv",encoding="utf-8-sig") as csvfile:
            reader = csv.DictReader(csvfile)
            txtfile = open("./data/content.txt","w",encoding="utf-8-sig")
            for row in reader:
                txtfile.write("\""+row.get("CONTENT")+"\""+"\n")
            txtfile.close()

        words_value = identify.words()
        score_var = []
        file_name = './data/content.txt'
        with open(file_name, 'r', encoding='utf-8', errors='ignore') as f:
            for line in f.readlines():
                # print(line)
                # print(type(line))
                segList = jieba.cut(line)
                segResult = []
                for w in segList:
                    segResult.append(w)
                with open('情感词典\\Stopwordlist.txt', 'r', encoding='GB2312') as f:
                    stopwords = f.readlines()
                    # print(stopwords)
                    newSent = []
                    for word in segResult:
                        if word + '\n' in stopwords:
                            continue
                        else:
                            newSent.append(word)
                    datafen_dist = {}
                    for x in range(0, len(newSent)):
                        datafen_dist[newSent[x]] = x
                    # datafen_dist=listToDist(data)
                    # print(datafen_dist)
                    data_1 = identify.classifywords(datafen_dist, words_value[0], words_value[1], words_value[2])
                    # print('\n1\n',data_1[0],'\n2\n',data_1[1],'\n3\n',data_1[2])
                    segResult_P = []
                    segList_P = jieba.cut(line)
                    for w in segList_P:
                        segResult_P.append(w)
                    data_2 = identify.scoreSent(data_1[0], data_1[1], data_1[2], newSent)
                    # print(data_2)
                    score_var.append(data_2)
        # print(score_var,'\n\n')
        good = 0
        normal = 0
        bad = 0
        binary = 0
        for score in score_var:
            if score > 0:
                good = good + 1
            elif score < 0:
                bad = bad + 1
            else:
                normal = normal + 1
        print('good_comments:', good, 'normal_comments:', normal, 'bad_comments:', bad, 'Total_comments:',
            good + normal + bad)
        good_comments_rate = good / (good + normal + bad)-0.3+0.13
        rate = good / (good + normal + bad)
        if rate < 0.5 and good_comments_rate < 0.5:
            binary = 1
        else:
            binary = 0
        print('文本评论真实率：%.2f%%' % (good_comments_rate * 100),'是否恶意差评：',binary)
        real = round(good_comments_rate*100,2)
    elif(os.path.exists("./data/testcsv.csv")):
        words_value = identify.words()
        score_var = []
        file_name = './data/content.txt'
        with open(file_name, 'r', encoding='utf-8', errors='ignore') as f:
            for line in f.readlines():
                # print(line)
                # print(type(line))
                segList = jieba.cut(line)
                segResult = []
                for w in segList:
                    segResult.append(w)
                with open('情感词典\\Stopwordlist.txt', 'r', encoding='GB2312') as f:
                    stopwords = f.readlines()
                    # print(stopwords)
                    newSent = []
                    for word in segResult:
                        if word + '\n' in stopwords:
                            continue
                        else:
                            newSent.append(word)
                    datafen_dist = {}
                    for x in range(0, len(newSent)):
                        datafen_dist[newSent[x]] = x
                    # datafen_dist=listToDist(data)
                    # print(datafen_dist)
                    data_1 = identify.classifywords(datafen_dist, words_value[0], words_value[1], words_value[2])
                    # print('\n1\n',data_1[0],'\n2\n',data_1[1],'\n3\n',data_1[2])
                    segResult_P = []
                    segList_P = jieba.cut(line)
                    for w in segList_P:
                        segResult_P.append(w)
                    data_2 = identify.scoreSent(data_1[0], data_1[1], data_1[2], newSent)
                    # print(data_2)
                    score_var.append(data_2)
        # print(score_var,'\n\n')
        good = 0
        normal = 0
        bad = 0
        binary = 0
        for score in score_var:
            if score > 0:
                good = good + 1
            elif score < 0:
                bad = bad + 1
            else:
                normal = normal + 1
        print('good_comments:', good, 'normal_comments:', normal, 'bad_comments:', bad, 'Total_comments:',
            good + normal + bad)
        good_comments_rate = good / (good + normal + bad)-0.3+0.13
        rate = good / (good + normal + bad)
        if rate < 0.5 and good_comments_rate < 0.5:
            binary = 1
        else:
            binary = 0
        print('文本评论真实率：%.2f%%' % (good_comments_rate * 100),'是否恶意差评：',binary)
        real = round(good_comments_rate*100,2)
        
    else:
        binary = -1
        real = -1
    return render_template('store.html',binary=binary,real=real)

@app.errorhandler(404)
def page_not_found(error):
    return render_template('404.html'), 404


    
if __name__ == '__main__':
    app.run(debug=True, port=8777)