#!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
import os
import shutil

from shareAPI2020TB import settings

server_logger = logging.getLogger("server")

import time


# import os,django
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shareAPI2020.settings")# project_name 项目名称
# django.setup()

import json
import jsonpath
import requests

from tbitem import models

'''
------本文件为副文件--------
get_3_tbContent.py ----- 把数据库中的淘宝id取出来，抓评论存储数据库

--------------------------
'''


def initlist(json_str):
    # 1初始化init
    items = []
    # 将json格式'str字符串'转化为python对象
    py_json = json.loads(json_str)
    # print(py_json)
    comments_list = py_json['comments']
    for comment in comments_list:
        # 使用jsonpath方式查找
        user = jsonpath.jsonpath(comment, '$..user')[0]
        # 用户头像
        user_face = 'http;' + user['avatar']
        # print(user_face)
        #用户名
        user_name=user['nick']
        # print(user_name)
        #评论内容
        user_text=comment['content']
        #评论日期
        user_date=comment['date']
        #手机信息
        info=jsonpath.jsonpath(comment, '$..sku')[0]
        #评论视频截图,有的没有视频video就是NULL值，所以加一个判断
        if 'VideoUrlideo' in comment:
            # 评论截图和视频
            videoimg = comment['video']['coverUrl']
            videoUrl = comment['video']['coverUrl']
        else:
            videoimg = None
            videoUrl = None

        #好评图片（comment['photos']是一个列表）
        imglist = []
        goodimglist = comment['photos']
        if goodimglist:
            for i in goodimglist:
                imglist.append('http;' +i['url'])

        item = {
            '评论内容': user_text,
            '好评图片': imglist,
        }
        print(item)
        dangerWord = ['垃圾','差评','破玩意','不要买','别买','差劲','他妈的','傻逼','坏的','此用户没有填写评价']  #过滤次
        for daner in dangerWord:
            if daner in item['评论内容']:
                print('发现不合适内容',daner)
                break
            # 3 append是加入到list
        items.append(item)

    return items


def tbmain(id,page):
    url = f'https://rate.taobao.com/feedRateList.htm?auctionNumId={id}&userNumId=903511695&currentPageNum={page}&pageSize=20'
    headers = {
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Referer': 'https://item.taobao.com/item.htm?spm=a230r.1.14.31.1a9df9eaZ2TtQ2&id=593014213362&ns=1&abbucket=4',

        'Cookie': 'miid=1034753504511266755;cna=dx1nFLT5mQcCAXWaR2QxwmBW;'

                  'thw=cn;t=325ff4e457909046382b22a33c83eadb;tracknick=acc%5Cu80E1%5Cu4F20;'

                  'lgc=acc%5Cu80E1%5Cu4F20;tg=0;v=0;'

                  'cookie2=128cea00303f12de470490c850cc00b1;_tb_token_=fdb4b335af83e;'

                  'unb=2701405241;sg=%E4%BC%A01a;_l_g_=Ug%3D%3D;skt=051d1e62c9ab8dc5;'

                  'cookie1=BxMH3NLGjomw6rJWdejLCTcGCFEkYwy%2BoQnBejFVBZI%3D;csg=93fdcb74;'

                  'uc3=vt3=F8dBy3jYcBxWmWeUeDY%3D;id2=UU8IPT1oZbApWA%3D%3D;nk2=AnIsGxSURQ%3D%3D;lg2=Vq8l%2BKCLz3%2F65A%3D%3D;'

                  'existShop=MTU1OTY1MDAxNw%3D%3D;_cc_=U%2BGCWk%2F7og%3D%3D;dnk=acc%5Cu80E1%5Cu4F20;_nk_=acc%5Cu80E1%5Cu4F20;'

                  'cookie17=UU8IPT1oZbApWA%3D%3D;mt=ci=6_1&np=;'

                  'enc=UFEUuOrXKTfwHDdKWP%2F9JacRMgPfTTQxKHxhkY8FExWK0%2FOKPqyryg048bj3QRjP%2Blwvl2%2BmU0N0pIBcrlD2PA%3D%3D;'
                  'hng=CN%7Czh-CN%7CCNY%7C156;'
                  'uc1=cookie15=VT5L2FSpMGV7TQ%3D%3D&cookie14=UoTZ7Yy7WSTGKg%3D%3D;'

                  '_m_h5_tk=3627342e3b09ae88a2c724b51c503d85_1559657695270;'
                  '_m_h5_tk_enc=c88d8e3f4ba68bc083f853e956656b6c;'
                  'x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0%26__ll%3D-1%26_ato%3D0;'
                  'x5sec=7b22726174656d616e616765723b32223a22623034623263306164386231666234336634303330663463646430663064653743504c4f32656346454e364e6b736a586d664c7373774561444449334d4445304d4455794e4445374d513d3d227d;'
                  'whl=-1%260%260%261559652412550;'
                  'l=bB_eivGRvlAmYuqwBOfgiuI8a17TwIdfhsPzw4_G2ICP_bfDwBcPWZTPLZTkC3GVa6GXR3oyuHLBBW8iLyUCh;'
                  'isg=BGNjWHeLO7xJIPCeqwvJFNPV8qfN8Pf9ajjwFJXBBkM21IT2HSu26rnCyuT_9E-S',

        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',

        'Connection': 'keep-alive',
        'Host': 'rate.taobao.com',
    }
    print(f"爬取评论的连接是---{url}")
    html = requests.get(url=url, headers=headers)
    print(html.text)
    json_str = html.text
    #清洗步骤（获取的字符串前后有括号）--去掉str前后括号
    json_str = json_str.strip().lstrip('(').rstrip(')')
    #str to JSON（dict）
    JSONlist = initlist(json_str)
    print("请求淘宝数据状态为")
    # print(JSONlist)
    return JSONlist


def step5(tbid):
    server_logger.error("step3-----Start")
    try:

        # 检测TbIndex里是否存在
        checkQuestId = models.TbIndex.objects.filter(tbid=tbid).first()
        if not checkQuestId: #不存在就添加
            models.TbIndex.objects.create(tbid=tbid).save()
            checkQuestId = models.TbIndex.objects.filter(tbid=tbid).first()
        pages = 3  #到淘宝爬取评论
        for page in range(1,pages):
            JSONlist = tbmain(tbid, page)  # 到淘宝爬取评论

            time.sleep(3)
            for item in JSONlist:
                if item["评论内容"]:
                    models.TbComment.objects.update_or_create(tbComment_Index=checkQuestId, commentWord=item["评论内容"])
                if len(item["好评图片"]) >= 1:
                    for img in item["好评图片"]:
                        # 插入内容到图片表
                        models.TbIMG.objects.update_or_create(tbimg_Index=checkQuestId, imgSrc=img.replace("http;", "http:"))
        server_logger.error("step3-----运行成功")
    except Exception as e:
        server_logger.error(f"step3-----step3出错----{e}")


import logging

import random
import requests
def downloadImg(tbid,url):
    response = requests.get(url)
    # 获取的文本实际上是图片的二进制文本
    img = response.content
    # 将他拷贝到本地文件 w 写  b 二进制  wb代表写入二进制文本

    if os.path.exists(os.path.join(settings.MEDIA_ROOT,str(tbid))):
        print("cunzai")
        shutil.rmtree(os.path.join(settings.MEDIA_ROOT,str(tbid)))

    os.makedirs(os.path.join(settings.MEDIA_ROOT, str(tbid)))
    filename = time.time()
    with open(os.path.join(settings.MEDIA_ROOT,str(tbid),f'{filename}.jpg'), 'wb') as f:
        f.write(img)
    return os.path.join(settings.MEDIA_ROOT,str(tbid),f'{filename}.jpg')

def RandomLinkContent():
    return '',''
def tbpinglun_getNeedData(TBComment_Query,TBimg_Query):
    commentList = []  # 评论列表
    commentImgList = []  # 图片列表
    # #  前提是必须先存入数据库，读取readinfo(id)数据库内容并整合数据开始，获得某个id的所有db-json数据,获得的结构是[(),()...数据结构]
    print(len(TBComment_Query))
    for i in TBComment_Query:
        commentList.append(i.commentWord)
    print(len(TBimg_Query))
    for i in TBimg_Query:
        commentImgList.append(i.imgSrc)
    logging.warning('正常运行')
    """现在考虑几种情况(评论只需要10段话,每段话只需要引用列表内容数量2,2,3,2,3,3，5，4，3，3，3，3，一共是40条就行)
    1.评论不够，0条，1条，4条
    2.评论够，图片0张，1张，3张
    3.评论和图片都不够
    4.足够用

    6个段落一定要凑够，不然模板会出错
    图片一定要有15张
    """
    '''内容处理'''
    if len(commentList) >= 50:  # 如果数据库评论足够试用，随机取出50条
        newcommentList = random.sample(list(commentList), 50)
        print('=======tbpinglun_getNeedData---commentList=========')
        print(commentList)
    else:
        ''' 如果不够40条数，就自动补充，把commentList补充够40条，内容留空
            下面的这个条功能是不够数量自动补充
        '''

        kouyu = []   #如果不够40条，填充内容随机填充，kouyu就是填充得内容
        kouyuList = ["心情美美的东西不错",'是个好东西,用完了再来回购','是正品第二次购买了，好不好自己用了才知道','我用了很久了很好用，良心推荐','包装完好 很满意 好评质量杠杠得','认真点评一下，真的很好,可以放心用','朋友推荐的品牌，售后给力','包装严实精美，效果不错，质量有保障是关键','哈哈。真心不错哦','好用,发的快递很快，一天就到了。物流必须点个赞！']
        newcommentList =random.sample(list(commentList), len(commentList))
        for num in range(50 - len(commentList)):
            kouyu.append(random.choice(kouyuList))
        newcommentList = list(newcommentList + kouyu)
        # print(commentList)

    '''图片处理'''
    if len(commentImgList) >= 11:  # 如果data里边图片足够用，随机取出5条
        newcommentImgList = random.sample(list(commentImgList), 11)
    else:
        '''如果图片不够11条，就留空，不显示就行了'''
        newcommentImgList = random.sample(list(commentImgList), len(commentImgList))
        newcommentImgList = list(newcommentImgList + [""] * (11 - len(commentImgList)))



    # print(type(word))
    # template(commentList,commentImgList)
    print(newcommentList)
    print(newcommentImgList)
    return newcommentList, newcommentImgList

"""    
需要的数据为：
    commentWordList 评论数据           TbComment -> commentWord
    item_TBK_url    商品淘宝客连接      TbIndex -> tbkURL
    shop_TBK_LINK   店铺淘宝客连接      TbIndex -> tbkShopURL
    imgSrcList      图片列表           TbIMG -> imgSrc
    keyword         核心词             BBSContent  -> keyword
    title           取得是             BBSContent  -> contentWord
    '''"""

from docx import Document
from docx.oxml.ns import qn
from docx.shared import Inches, Pt, RGBColor
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
def createWord(tbid):
    document = Document()
    #遍历id目录内所有的图片
    picList = os.listdir(os.path.join(settings.MEDIA_ROOT,str(tbid)))  #['1590389644.933063.jpg']
    print(picList)
    document.add_paragraph('A plain paragraph having some ')
    document.add_picture(os.path.join(settings.MEDIA_ROOT,str(tbid),picList[0]))
    document.save(os.path.join(settings.MEDIA_ROOT,'docx','1234.docx'))





def ContentFactory(TBimg_Query,TBComment_Query,tbid):
    document = Document()
    print('第一段需要3条')
    print('第二段介绍语')
    print('第三段段需要3条，循环pop')
    # init

    try:

        commentList, pic = tbpinglun_getNeedData(TBComment_Query,TBimg_Query)  # 数据整理函数，返回word[]6个，pic[]5个
        word = []
        # 模板需要的数据：
        print(type(commentList[1]))
        word.append(''.join(commentList[0:1]))  # 第0段
        word.append(''.join(commentList[2:4]))  # 第1段
        word.append(''.join(commentList[5:8]))  # 第2段
        word.append(commentList[9] + commentList[10])  # 第3段
        word.append(''.join(commentList[11:13]))  # 第4段
        word.append(''.join(commentList[14:17]))  # 第5段
        word.append(''.join(commentList[18:20]))  # 第6段
        word.append(commentList[21]  + commentList[23])  # 第7段
        word.append(''.join(commentList[26:29]))  # 第8段
        word.append(''.join(commentList[31:34]))  # 第9段
        word.append(''.join(commentList[35:38]))  # 第10段
        word.append(commentList[39]  + commentList[40])  # 第11段
        word.append(''.join(commentList[41:43]))  # 第12段

        # '''正则结束End'''
        # # 获取到wx的广告语,插入到模板
        wxword = '微信广告语'
        randomtitle1, randomUrl = RandomLinkContent()
        randomtitle2, randomUr2 = RandomLinkContent()
        # clickword = "查看更多网友实拍 »"


        document.add_paragraph(f'{word[0]}')
        picSrc = downloadImg(tbid, pic[0])
        document.add_picture(os.path.join(settings.MEDIA_ROOT, str(tbid), picSrc))

        document.add_paragraph(f'{word[1]}')
        picSrc = downloadImg(tbid, pic[1])
        document.add_picture(os.path.join(settings.MEDIA_ROOT, str(tbid), picSrc))

        document.add_paragraph(f'{word[2]}')
        picSrc = downloadImg(tbid, pic[2])
        document.add_picture(os.path.join(settings.MEDIA_ROOT, str(tbid), picSrc))

        document.add_paragraph(f'{word[3]}')
        picSrc = downloadImg(tbid, pic[3])
        document.add_picture(os.path.join(settings.MEDIA_ROOT, str(tbid), picSrc))

        document.add_paragraph(f'{word[4]}')
        picSrc = downloadImg(tbid, pic[4])
        document.add_picture(os.path.join(settings.MEDIA_ROOT, str(tbid), picSrc))

        document.add_paragraph(f'{word[5]}')
        picSrc = downloadImg(tbid, pic[5])
        document.add_picture(os.path.join(settings.MEDIA_ROOT, str(tbid), picSrc))

        document.add_paragraph(f'{word[6]}')
        picSrc = downloadImg(tbid, pic[6])
        document.add_picture(os.path.join(settings.MEDIA_ROOT, str(tbid), picSrc))
        document.save(os.path.join(settings.MEDIA_ROOT, 'docx', '1234.docx'))

    except Exception as e:
        print(e)
        logging.info('组合评论成为文章步骤出错,问题在postMAIN.py tbpinglun函数里边,采集发帖失败')
        # exit(code='产品现在没有足够的评论，采集发帖失败')
        return None
    return 'fdasfda'
if __name__ == '__main__':
    tbid = 2434324
    step5(tbid)  #负责把bbs的内容遍历淘宝id，抓取淘宝评论到数据库
    TBimg_Query = models.TbIMG.objects.filter(tbimg_Index__tbid=tbid).all()
    TBComment_Query = models.TbComment.objects.filter(tbComment_Index__tbid=tbid).all()
    Content = ContentFactory(TBimg_Query,TBComment_Query)

