# -*- codeing = utf-8 -*-
'''
@Time : 2021/10/05 13:20
@Author : Sa布
@File : 图志details.py
@Software : PyCharm
'''


from bs4 import BeautifulSoup  # 网页解析，获取数据
import urllib.request, urllib.error  # 制定URL，获取网页数据
import re
from lxml import etree
import ssl
import time
import json
import sys
import getopt

ssl._create_default_https_context = ssl._create_unverified_context

findlink1 = re.compile(r'<a class="mems" href="(.*?)" target="_blank">')
findlink2 = re.compile(r'<a href="(.*?)">')

def main(argv):

    try:
        options, args = getopt.getopt(argv, "u:", ["url="])
        # 短格式分析串。例如："hs:t:q:"，h后面没有冒号，表示后面不带参数s,t,q后面带有冒号，表示后面带参数。
        # 长格式分析串列表。例如：["help", "sl=", "tl=", "q="]，help后面没有等号，表示后面不带参数；sl,tl和q后面带冒号，表示后面带参数。
    except getopt.GetoptError:
        sys.exit()
    # 获取命令行参数并更新到字典data相应的位置
    for option, value in options:
        if option in ("-u", "--url"):
            URL = format(value)

            Details(URL)
        # if option in ("-t", "--tl"):
        #     dict2 = {'tl': format(value)}
        #     data.update(dict2)
        # if option in ("-q", "--q"):
        #     dict3 = {'q': format(value)}
        #     data.update(dict3)


def Details(URL):
    url = URL
    html = askURL(url)
    soup = BeautifulSoup(html, "html.parser")
    titleCount=0
    textCount=0
    imgCount=0
    a = [0 for i in range(30)]
    b = [0 for i in range(30)]
    c = [0 for i in range(30)]
    d = [0 for i in range(30)]


    details = askURL(url)
    soup1 = BeautifulSoup(details, "html.parser")
    # print(details)
    for paragh in soup.find_all('div', class_="terminal"):
        paragh = str(paragh)
    #   print(movietext)
        selector = etree.HTML(details)
        # 以下代码爬取标题
        title = selector.xpath("//div[@class='terminal_tit_1']/text()")
        j=0
        for each in title:
            each = each.replace('\n', '')
            each = each.replace('\r', '')
            each = each.replace('\t', '')
            #print(each)
            a[j] = each
            titleCount=titleCount+1
            j=j+1
        # 以下代码爬取正文
        text1 = selector.xpath("//div[@class='terminal_con']/p[position()<50]/span/text()")
        text2 = selector.xpath("//div[@class='terminal_con']/p/strong/span/text()")
        text1 = [re.sub(r"\s", "", each) for each in text1]
        text2 = [re.sub(r"\s", "", each) for each in text2]
        # 以下代码爬取图片
        img1 = selector.xpath("//div[@class='terminal_con']/p/img/@src")
        img2 = selector.xpath("//div[@class='terminal_con']/p/span/img/@src")# 图片
        j=0
        for each in text1:
            #print(each)
            if len(each) > 10:
                b[j] = each
                textCount=textCount+1
                j=j+1
        for each in text2:
           # print(each)
            if b[j] == 0 and len(each) > 10 :
                b[j] = each
            textCount=textCount+1
            j=j+1
        # count=0
        j=0
        for each in img1:
            # if count == 1:
            #     break
            imgs=each.replace("..", "https://www.56-china.com.cn/")
            #print(imgs)
            c[j] = imgs
            imgCount = imgCount + 1
            j=j+1

            # count =count+1
        for each in img2:
            # if count == 1:
            #     break
            imgs=each.replace("..", "https://www.56-china.com.cn/")
            c[j]=imgs
            #print(imgs)
            imgCount = imgCount + 1
            j=j+1

    dict=builtjson(titleCount, textCount, imgCount)
    timee = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    dict["time"]=timee
    for i in range(titleCount):
        dict["title"]=a[i]
    for i in range(textCount):
        dict["text"]["text" + str(i)] = b[i]
    for i in range(imgCount):
        dict["img"]["img" + str(i)] = c[i]
    data = json.dumps(dict, ensure_ascii=False)
    print(data)


            # count =count+1


def builtjson(titlec,textc,imgc):
    d = dict()
    title = dict()
    text=dict()
    img=dict()

    # for i in range(titlec):
    #     title["title" + str(i)] = ""
    for i in range(textc):
        text["text" + str(i)] = ""
    for i in range(imgc):
        img["img" + str(i)] = ""
    d["title"] = title
    d["text"] = text
    d["img"] = img

    return d

def askURL(url):
    head = {  # 模拟浏览器头部信息，向豆瓣服务器发送消息
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36"
    }  # 用户代理，表示告诉爬取网页的我们是什么机器，什么浏览器（本质上是告诉浏览器我们可以接受什么样的文件信息
    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        # print(html)
        # with open('11.html','w',encoding='utf-8')as f:
        #     f.write(html)
        #     f.close()

    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html



if __name__ == "__main__":
    main(sys.argv[1:])
    # b=webdriver.Chrome()
