#!/usr/bin/python3
# Filename: support.py
import logging
import urllib.request
import xlwt
import requests
from lxml import etree
import time
import re
import mysql.connector
import os
import sys

import seimUtil

sys.path.append("..\\..\\..\\utils")
import ccwfile

upload_url = "http://localhost:18085/chenchen-fast/reptile/reptilenetbianimage/upload"
logging.basicConfig(level=logging.INFO,
                    format="%(asctime)s %(name)s %(levelname)s %(message)s",
                    datefmt='%Y-%m-%d  %H:%M:%S %a')  # 注意月份和天数不要搞乱了，这里的格式化符与time模块相同

url_base = "http://www.netbian.com"

base_dir = "E:\data\image\\"
current_type = "default"
current_page = 1

base_cookie = '__yjs_duid=1_81be5962b29ac4994078b6141f2256dd1626244566834; yjs_js_security_passport=852398bf3fb5bf84a06a9a655cfcbee666c05f88_1626244569_js; '
cookie = '__yjs_duid=1_81be5962b29ac4994078b6141f2256dd1626244566834; Hm_lvt_14b14198b6e26157b7eba06b390ab763=1626244570; xygkqecookieclassrecord=%2C1%2C28%2C3%2C19%2C; xygkqecookieinforecord=%2C1-1%2C1-3%2C19-19670%2C; yjs_js_security_passport=52f7c8f82002761fa5515f7d8ea120dd53ebe249_1626250905_js; Hm_lpvt_14b14198b6e26157b7eba06b390ab763=1626251104'


def getCookie(c_name):
    c_start = cookie.index(c_name + "=")
    if c_start:
        c_start = c_start + len(c_name) + 1;
        c_end = document.cookie.indexOf(";", c_start);
        if not c_end:
            c_end = document.cookie.length;
        return cookie.substring(c_start, c_end);


def handle_header(param):
    headers = {
        "Host": "www.netbian.com",
        "Connection": "keep-alive",
        "Cache-Control": "max-age=0",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cookie": param,
        "If-None-Match": "60d9e1cf-a8928",
        "If-Modified-Since": "Mon, 28 Jun 2021 14:50:55 GMT"

    }
    return headers


def handle_img_header(param):
    img_headers = {
        "Host": "img.netbian.com",
        "Connection": "keep-alive",
        "Cache-Control": "max-age=0",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cookie": param,
        "If-None-Match": "60d9e1cf-a8928",
        "If-Modified-Since": "Mon, 28 Jun 2021 14:50:55 GMT"
    }
    return img_headers


def start_download():
    base_text = get_htm_msg(url_base)
    type_position_find = '<div class="nav cate">(.*?)</div>'
    type_position = re.findall(type_position_find, base_text, re.S)
    logging.info("定类型位信息块长度为: %s ", len(type_position))
    if len(type_position) > 0:
        type_text = type_position[0]
        type_find = '<a href="/(.*?)".*?>(.*?)</a>'
        types = re.findall(type_find, type_text, re.S)
        logging.info("类型数量为: %s ", len(types))
        type_num = len(types)
        current_type_index =0
        for type_info in types:
            current_type_index=current_type_index+1
            logging.info("类型地址为: %s == 类型名称为 %s ", type_info[0], type_info[1])
            type_url = '/' + type_info[0]
            global current_type
            current_type = re.sub(u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a])", "",
                                  type_url)
            # print(current_type)
            if not os.path.exists(base_dir + current_type):
                os.makedirs(base_dir + current_type)
            # if current_type_index <= 9:
            #     continue
            # if current_type != "slol":
            #     continue
            start_by_type(type_url)


def start_by_type(type):
    global current_page
    search_url = url_base + type
    html_msg = get_htm_msg(search_url)

    current_page = 1

    if current_type != "xx":
        handle_page(html_msg)

    while get_next_page_url(html_msg) != "" and get_next_page_url(html_msg) != None:
        current_page = current_page + 1
        search_url = get_next_page_url(html_msg)
        logging.info("下一页地址为 : " + search_url.__str__())
        html_msg = get_htm_msg(search_url)
        if  current_page > 3:
            continue
        handle_page(html_msg)


def get_next_page_url(html):
    page_find = '<div class="page">(.*?)</div>'
    pages_msg = re.findall(page_find, html, re.S)
    page_text = ''
    if pages_msg:
        page_text = pages_msg[0]
    list_find = '<a (.*?)</a>'
    list_msgs = re.findall(list_find, page_text, re.S)
    if list_msgs:
        for tmp_msg in list_msgs:
            if '下一页' in tmp_msg:
                next_page_find = 'href="(.*?)"'
                next_urls = re.findall(next_page_find, tmp_msg, re.S)
                if next_urls:
                    # logging.info("下一页地址为 : " + (url_base + next_urls[0]))
                    return url_base + next_urls[0]
                else:
                    logging.info("暂时无下一页")
                    return ""


def get_htm_msg(url):
    global cookie

    logging.info("开始解析 : " + url.__str__())
    try:
        res = requests.get(url=url, headers=handle_header(cookie))
        if res.status_code == 503:
            cookie = base_cookie + seimUtil.get_cookie(url)
            res = requests.get(url=url, headers=handle_header(cookie))

        res.encoding = 'gbk'
        return res.text
    except:
        return ""


def handle_page(page_msg):
    logging.info("开始解析类型为 %s 的第 %s 页", current_type.__str__(), current_page.__str__())
    # 先定位图片列表所在区间
    list_find = '<div class="list">(.*?)</div>\r\n\t<div class="'
    list_msgs = re.findall(list_find, page_msg, re.S)
    if len(list_msgs) > 0:
        msg = list_msgs[0]
        img_find = '<li><a href="(.*?)" title=".*?" target="_blank"><img src=".*?</a></li>'
        images = re.findall(img_find, msg, re.S)
        logging.info("当前页面图片数量为 : " + len(images).__str__())
        for image in images:
            handle_image(url_base + image)


def handle_image(image_url):
    html = get_htm_msg(image_url)
    img_find = '<div class="pic"><p><a href=".*?<img src="(.*?)" alt="(.*?)".*?</a></p>'
    img = re.findall(img_find, html, re.S)
    if len(img) > 0:
        download_file(img[0])


def download_file(img_url):
    global cookie
    logging.info("开始解析 <<" + img_url[1] + ">> === 地址为 : " + img_url[0])
    file_name = img_url[0].split("/")[len(img_url[0].split("/")) - 1]
    if not img_url[0]:
        return
    # 经常卡顿,试用request代替
    # urllib.request.urlretrieve(img_url[0],base_dir + current_type + "\\" + file_name)
    r = requests.get(img_url[0], headers=handle_img_header(cookie))
    if r.status_code == 503:
        cookie = base_cookie + seimUtil.get_cookie(img_url[0])
        r = requests.get(url=url, headers=handle_img_header(cookie))

    ##打开文件并写入
    if not os.path.exists(base_dir + current_type + "\\第" + current_page.__str__() + "页\\"):
        os.makedirs(base_dir + current_type + "\\第" + current_page.__str__() + "页\\")
    with open(base_dir + current_type + "\\第" + current_page.__str__() + "页\\" + file_name, 'wb') as f:
        f.write(r.content)
    ccwfile.upload_file(upload_url+'?fileName='+img_url[1]+"&imageType="+current_type, base_dir + current_type+ "\\第"+ current_page.__str__()+ "页\\" + file_name)

# if __name__ == '__main__':
#     sxxx='http://img.netbian.com/file/2021/0621/72ea91dc65556caf49e9f7743d846c63.jpg'
#     urllib.request.urlretrieve(sxxx, 'dad.jpg')
