import hashlib
import json
import random
import re
import sys
import time

import logging
from pymongo.errors import AutoReconnect
from retry import retry
from lxml import etree,html
import os
import cchardet
import requests
from datetime import datetime
from parsel import Selector
from selenium import webdriver
from selenium.webdriver.chrome.options import Options



BASH_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASH_DIR)
from tender_project.conf.Pic_Use import get_image1, small_pic, upload_image, del_pic
#

from loguru import logger


def remove_js_css(etree_obj):
    '''
    删除js和css
    '''
    element_obj = etree_obj.xpath("//script | //noscript")
    for e in element_obj:
        e.getparent().remove(e)
    element_obj = etree_obj.xpath('//style | //nostyle')
    for e2 in element_obj:
        e2.getparent().remove(e2)
    return etree_obj

def remove_js_style(response):
    '''
    删除HTML中的js和css
    '''
    try:
        encoding = response.apparent_encoding
        tree = etree.HTML(response)
        ele = tree.xpath("//script | //noscript")
        for e in ele:
            e.getparent().remove(e)
        ele2 = tree.xpath('//style | //nostyle')
        for e2 in ele2:
            e2.getparent().remove(e2)
        Html = html.tostring(tree, encoding=encoding).decode(encoding)
        return Html
    except Exception as err:
        return False

def clear_html(html_str):
    choice = Selector(html_str)
    choice.xpath('//script').remove()
    choice.xpath('//script').remove()
    choice.xpath('//style').remove()
    choice.xpath('//noscript').remove()
    choice.xpath('//nostyle').remove()
    # script
    html_ = choice.get()
    return html_

def send_request(method: str, url: str, attr: str = None, data=None, headers: dict = None, cookies: dict = None,
                 weather_remove: bool = True, weather_wait: bool = True, time_out: int = 30) -> str or bool:
    """
    发送一个请求，若不成功，重复发送两次
    :param headers:  网页的请求头信息,不传入user-agent
    :param method:  发送请求的方法 为 "GET" 或 "POST" 暂时支持这两者
    :param url:     需要请求的网址 str格式
    :param attr:    post请求是否传入json或data 如果是data 则attr为 "data"
    :param data:    post请求的参数  如果 attr为data 则传入dict形式数据  如果attr
    :param cookies:    post请求的参数  必要时单独传入cookies
    :param weather_remove:   是否需要删除script style 等 标签
    :param weather_wait:   是否需要等待
    :param time_out:   等待超时时间
    :return:
    """
    if headers is None:
        headers = {}
    count = 1
    while True:
        headers_ = {
            # "User-Agent": str(UserAgent().random)
            "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
        }
        headers_.update(headers)
        session = requests.session()
        session.keep_alive = False
        proxy = None
        sleep_time = random.randint(1, 3)
        try:
            if method == "GET":
                response = session.request(method=method, url=url, headers=headers_, proxies=proxy, timeout=time_out,
                                           cookies=cookies, verify=False)
                response.encoding = response.apparent_encoding  # 设置返回的文本的编码格式
                if weather_wait:
                    time.sleep(sleep_time)
            else:
                if attr == "json":
                    response = session.request(method=method, url=url, headers=headers_, proxies=proxy, json=data,
                                               timeout=time_out, cookies=cookies, verify=False)
                else:
                    response = session.request(method=method, url=url, headers=headers_, proxies=proxy, data=data,
                                               timeout=time_out, cookies=cookies, verify=False)
                if weather_wait:
                    time.sleep(sleep_time)
        except Exception as e:
            count = count + 1
            logging.error("请求 %s 错误" % url)
            time.sleep(3)
            if count > 3:
                logging.error("请求 %s 错误3次，等待120秒后再次访问该url" % url)
                time.sleep(120)
            elif count > 5:
                logging.error("请求 %s 错误5次，本次不再访问该url,进行下一个链接访问" % url)
                return False
            else:
                continue
        else:
            if response.status_code == 200:
                if response.text == "reload":
                    return False
                if method == "GET":
                    if weather_remove:
                        Html = remove_js_style(response)
                        return Html
                    else:
                        return response.content.decode(encoding=cchardet.detect(response.content)['encoding'])
                else:
                    return response.content.decode(encoding=cchardet.detect(response.content)['encoding'])
            else:
                logging.error("访问%s失败,失败码%s" % (url, response.status_code))
                print(response.status_code)
                return response.status_code


def originalurl_data_from(originalurl_data_from):
    '''
    :params originalurl_data_from
    构建标书唯一id
    '''
    if originalurl_data_from.get("originalurl_data_from"):
        '''item["originalurl_data_from"] = {
            "url": real,
            "method": "get",
            "request_only_data": "",
            "response_only_data": ""
            # url+请求格式+request参数+respone参数
        }'''
        originalurl_data_from_dict = dict(originalurl_data_from["originalurl_data_from"])
        mod_json = {}
        mod_json["url"] = originalurl_data_from_dict["url"]
        mod_json["method"] = originalurl_data_from_dict["method"]
        if "data" in originalurl_data_from_dict.keys():
            if originalurl_data_from_dict['data']:
                mod_json["request_only_data"] = originalurl_data_from_dict["data"]
        else:
            if "request_only_data" in originalurl_data_from_dict.keys():
                if originalurl_data_from_dict["request_only_data"]:
                    mod_json["request_only_data"] = dict(originalurl_data_from_dict["request_only_data"])
            if "response_only_data" in originalurl_data_from_dict.keys():
                if originalurl_data_from_dict["response_only_data"]:
                    mod_json["response_only_data"] = dict(originalurl_data_from_dict["response_only_data"])
        return mod_json
    else:
        return "缺少originalurl_data_from字段"

def get_unid(*args):
    if type(args[0]) == dict:
        md = hashlib.md5(json.dumps(args[0]).encode(encoding="utf-8")).hexdigest()
        md5 = md.replace("-", "").lower()
    else:
        md = hashlib.md5(("".join([str(every) for every in args])).encode(encoding="utf-8")).hexdigest()
        md5 = md.replace("-", "").lower()
    return md5

def born_tender(item):
    '''
    返回标书唯一id
    '''
    result = originalurl_data_from(item)
    return get_unid(result)


def get_proxies():
    """
    # 通过Limit_type去生成一个新的IP代理
    :return:
    """
    file_path = "/root/tender_project/gonggongjiaoyiziyuan_spider/spider_util/proxie.txt"
    with open(file_path, "r") as f:
        proxy = json.loads(f.read())
        ip = proxy["data"][0]["ip"]
        port = proxy["data"][0]["port"]
        proxy = "{}:{}".format(ip, port)
    return proxy

# 截图上传
def update_image2(id, coll_name, path, xpath_err):
    # conn = db_m[coll_name]
    try:
        coll_name.update_one({
            "_id": id
        },
            {"$set": {
                "SnapShot": path,
                "image_status": 2,
                "xpath_err": 0,
            }})
    except Exception as e:
        print('update_image2 %s' %e)
        return False
    else:
        return True

def upload_image_delete_pic2(url, coll_name, id, text_xpath,send_request_selenium):
    '''
    上传成功后删除图片
    '''
    # 获取uuid
    pic_test_image_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "pic_test")
    pic_image_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "pic")
    if not os.path.exists(pic_test_image_path):
        os.makedirs(pic_test_image_path)
    if not os.path.exists(pic_image_path):
        os.makedirs(pic_image_path)
    pic_path = os.path.join(pic_test_image_path, "{}.png".format(id))
    small_pic_path = os.path.join(pic_image_path, "{}.png".format(id))
    html = get_image1(url, pic_path, text_xpath,send_request_selenium)
    print('路径是')
    print(pic_path)
    if not html:
        # print('upload_image_delete_pic2 :::: %s' %e)
        update_image2(id, coll_name, '', xpath_err=1)
        return False
    small_pic(pic_path, small_pic_path)
    # TODO 验证图片是否有效
    path, image_status = upload_image(pic_path, url)
    # 如果image_status为TRUE 退出程序
    if image_status:
        print('相同截图超过三次以上，停止程序')
        raise
        # exit()
    if path:
        # del_pic(path)
        result = update_image2(id, coll_name, path, xpath_err=0)
        if not result:
            print("图片上传失败！")
            del_pic(path)
        else:
            print(url)
            print("https://oss.jianshequan.com/oss/" + path)
            print("图片上传成功！")
        os.remove(pic_path)
        os.remove(small_pic_path)
        return path
    else:

        print("图片上传失败！")
        pass
        # os.remove(pic_path)
        # os.remove(small_pic_path)
        # coll_name.update_one({"_id": id}, {"$set": {
        #     "image_status": 3
        # }})
        return False

def sha256_all_text( all_text):
    """
    sha256加密alltext
    """
    sha = hashlib.sha256()
    sha.update(all_text.encode())
    return sha.hexdigest()


def get_all_text(res, xpath):
    re_rule = re.compile("[\u4e00-\u9fa5]")
    """
    获取页面主体部分的纯文本
    """
    res = etree.HTML(res)
    res_text = "".join(res.xpath(xpath))
    all_text = "".join(re_rule.findall(res_text))
    if all_text and len(all_text) > 50:
        html_id = sha256_all_text(all_text)
        return html_id
    return ""


def str_time_to_datetime(str_time):
    try:
        if type(str_time)==str:
            if len(str_time)<=10:
                publishtime = datetime.strptime(str_time,'%Y-%m-%d')
            else:
                publishtime = datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S')
        else:
            publishtime = str_time
        return publishtime
    except Exception as err:
        logger.error(f'发布时间格式转换err info::{err},请在方法中添加相应字符串格式')
        with open(file='publishtime.log',mode='a+',encoding='utf-8') as f:
            f.write(str_time)
        return False

@retry(AutoReconnect, tries=4, delay=1)
def update_params2(id, page, status, db_name):
    # params_conn = db_m[db_name]
    query = {
        "_id": id
    }
    db_name.update_one(query, {"$set": {
        "page": page,
        "status": status
    }})


def title_strip(title):
    """
    去除标题中的空格
    """
    temp = []
    for i in title:
        if i.isspace() or i == " ":
            continue
        else:
            temp.append(i)
    return "".join(temp)

def get_file_json(text_xpath, html_):
    rule = re.compile("\.([^.]*)$")
    res = etree.HTML(html_)
    file_url_list1 = res.xpath(text_xpath + "//a/@href")
    file_url_list2 = res.xpath(text_xpath + "//button/@code")
    file_url_list = file_url_list1 + file_url_list2
    file_name_list1 = res.xpath(text_xpath + "//a//text()")
    file_name_list2 = res.xpath(text_xpath + "//button//text()")
    file_name_list = file_name_list1 + file_name_list2
    file_url_real_list = []
    file_name_real_list = []
    for file_url, file_name in zip(file_url_list, file_name_list):
        if ".pdf" in file_url or ".zip" in file_url or ".doc" in file_url or ".rar" in file_url or ".zip" in file_url \
                or ".xlsx" in file_url or ".pdf" in file_name or ".zip" in file_name or ".doc" in file_name or ".rar" in file_name \
                or ".zip" in file_name or ".xlsx" in file_name:
            file_url_real_list.append(file_url)
            file_name_real_list.append(file_name)
    file_type = ["".join(rule.findall(i)) for i in file_url_real_list]
    file_json = {"files": []}
    for n, u, t in zip(file_name_real_list, file_url_real_list, file_type):
        if n and t and u:
            file_json["files"].append({"file_name": n, "file_url": u, "file_type": t})
    if file_json == {"files": []}:
        file_json = ""
    return file_json


letter_rule = re.compile("[A-Za-z]*")
number_rule = re.compile("[0-9]*")
characters_rule = re.compile("[`~!@#$^&*()=|{}':;,.<>《》/?！￥…（）‘；：”“。，、？·]*")


def is_chinese(string):
    """
    检查整个字符串是否全是中文
    :param string: 需要检查的字符串
    :return: bool
    """
    for ch in string:
        if u'\u4e00' <= ch <= u'\u9fff':
            continue
        else:
            return True
    else:
        return False


def is_have_number(string):
    temp_string = "".join(number_rule.findall(string))
    if temp_string:
        return True
    else:
        return False


def is_have_letter(string):
    """
    判断是否有字母
    """
    temp_string = "".join(letter_rule.findall(string))
    if temp_string:
        return True
    else:
        return False


def is_have_characters(string):
    """
    判断是否含有特殊字符
    """
    temp_string = "".join(characters_rule.findall(string))
    if temp_string:
        return False
    else:
        return True

def get_all_nbumber(response, text_xpath):
    """
    获取所有符合的字符组合
    """
    res = etree.HTML(response)
    all_text_list = res.xpath(text_xpath + "//*/text()")
    number_list = []
    for i in all_text_list:
        temp = i.strip()
        if temp and not temp.isdigit() and is_have_letter(temp) and is_chinese(
                temp) and is_have_characters(temp) and is_have_number(temp):
            number_list.append(temp)
    return number_list


def get_projectno(number_list, length):
    """
    获取这些字符串的公共子串
    """
    projectno = []
    for num in zip(*number_list):
        temp = set(num)
        if len(temp) == 1:
            projectno.append("".join(temp))
        else:
            break
    projectno = "".join(projectno)[:length]

    return projectno, number_list


def get_number(response, length, text_xpath=""):
    """
    对外提供的接口
    """
    number_list = get_all_nbumber(response, text_xpath)
    projectno, number_list = get_projectno(number_list, length)
    return projectno, number_list

def judge_xpath( res,xpaths):
    """
    判断xpath
    """
    for xpath in xpaths:
        temp = parse_xpath(res, xpath)
        if temp:
            return xpath
    return ""

def parse_xpath( res, xpath):
    """
    判断xpath是否可用
    """
    html_element = etree.HTML(res)
    result = html_element.xpath(xpath)
    print(result)
    if len(result) >= 1:
        html = etree.tostring(result[0], encoding="utf-8").decode("utf-8")
        if "<" in html and ">" in html:
            return True
    return False

#判断该详情页是否采集结束
def judge_xpath_err( text_xpath, result, weather_text_xpath=True):
    """
    判断是否需要text_xpath xpath_err
    """
    if weather_text_xpath:
        if text_xpath:
            image_status, xpath_err = judge_xpath_err2(result)
        else:
            xpath_err = 1
            image_status = 0
        return xpath_err, image_status
    else:
        image_status, xpath_err = judge_xpath_err2(result)
        return xpath_err, image_status

def judge_xpath_err2( result):
    """
    判断xpath_err
    """
    industry = result["industry"]
    ifbprogress = result["ifbprogress"]
    if not industry:
        if ifbprogress == "中标公告":
            image_status, xpath_err = judge_SnapShot(result)
        else:
            image_status = 0
            xpath_err = 0
    else:
        if (industry == "工程建设" or industry == "建设工程" or industry == "政府采购") and ifbprogress == "中标公告":
            image_status, xpath_err = judge_SnapShot(result)
        else:
            image_status = 0
            xpath_err = 0
    return image_status, xpath_err

def judge_SnapShot( result):
    """
    判断是否有截图
    """
    try:
        SnapShot = result["SnapShot"]
    except:
        SnapShot = ""
    if SnapShot:
        image_status = 2
        xpath_err = 0
    else:
        image_status = 0
        xpath_err = 1
    return image_status, xpath_err

def create_indexes(index_status,db_conn):
    """
    创建索引
    """
    if index_status:
        temp_url_index = True
        temp_status_index = True
        industry_status_index = True
        update_time_index = True
        html_id_index = True
        cz_index = True
        for index in db_conn.list_indexes():
            if "originalurl" in index["name"]:
                temp_url_index = False
                continue
            if "status" in index["name"]:
                temp_status_index = False
                continue
            if "industry" in index["name"]:
                industry_status_index = False
            if "update_time" in index["name"]:
                update_time_index = False
            if "html_id" in index["name"]:
                html_id_index = False
            if 'cz' in index['name']:
                cz_index = False


        if temp_url_index:
            db_conn.create_index([("originalurl", 1)], unique=True, background=True)
        if temp_status_index:
            db_conn.create_index([("status", 1)], background=True)
        if industry_status_index:
            db_conn.create_index([("industry", 1), ("SnapShot", 1), ("ifbprogress", 1)],
                                                  background=True)
            db_conn.create_index(
                [("industry", 1), ("SnapShot", 1), ("ifbprogress", 1), ("image_status", 1)], background=True)
        if update_time_index:
            db_conn.create_index([("update_time", 1)], background=True)
        if html_id_index:
            db_conn.create_index([("title", 1), ("ifbprogresstag", 1), ("html_id", 1)],
                                                  background=True)
        if cz_index:
            db_conn.create_index([("is_html", 1), ("cz", 1)],
                                 background=True)
    index_status = False
    return index_status

def create_index_mongo(db_conn_obj,*args):
    '''
    db_conn_obj -> 数据库连接对象
    args -> 需要创建索引的字段名称 -> tuple类型
    '''
    indexes = db_conn_obj.list_indexes()
    create_list = []  # 成功创建的索引列表
    for one_tuple in args:
        try:
            if type(one_tuple)!=tuple:
                print(f'请传入元组类型参数  info::{one_tuple}')
                continue
            if len(one_tuple)==1:
                index_name = str(one_tuple[0])+'_1'
            elif len(one_tuple)>1:
                index_name = '_'.join(list(map(lambda s:s+'_1',one_tuple)))
            else:
                continue
            if index_name not in indexes:
                db_conn_obj.create_index(list(map(lambda s:(s,1),one_tuple)))
                create_list.append(index_name)
        except Exception as err:
            if 'already exists with different options' in str(err):
                logger.debug(f'索引已存在 index::{one_tuple}')
                continue
            logger.error(f'创建索引错误 info::{err}  index::{one_tuple}')
    return create_list

def get_cookie(url):
    """
    通过selenium获取JSEESIONID
    """
    if 'win' in sys.platform:
        chrome_name = r'E:\tender_1\tender_project\chromedriver.exe'
    else:
        chrome_name = "/root/chromedriver"
    chrome_driver_path = chrome_name
    chromedriver = chrome_driver_path
    chrome_options = Options()
    chrome_options.add_argument('headless')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--disable-infobars')
    chrome_options.add_argument('--single-process')
    chrome_options.add_argument('--disk-cache-size=12428800')
    chrome_options.add_argument('--log-level=3')
    chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
    driver = webdriver.Chrome(executable_path=chromedriver, options=chrome_options)
    driver.implicitly_wait(10)
    driver.get(url)
    list_cookies = driver.get_cookies()
    driver.quit()
    cookie_list = []
    for coockie_ in list_cookies:
        cookie_str = '%s=%s; ' % (coockie_['name'], coockie_['value'])
        cookie_list.append(cookie_str)
    cookie = ''.join(cookie_list)
    return cookie