from requests.exceptions import ChunkedEncodingError, SSLError, ConnectionError

from crawler import config
import datetime
import time
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
import requests
from bs4 import BeautifulSoup
import random
import file_md5.FilePrint as file_print
import os
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium import webdriver

headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
path = config.generalConfig.output_path
# 谷歌浏览器位置
chrome_location = r'D:\\Software\\Chrome\\chrome-win64_131\\chrome.exe'
# 用户数据目录
user_data_dir = r'C:\Users\Administrator\AppData\Local\Google\Chrome\User Data\Default'

# 日志初始化
# log = Logger("D:/workspace/logs/crawler.log", level='info')
requests.packages.urllib3.disable_warnings()
http_headers = {'User-Agent' :
                      'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'}
def get_random_user_agent():
    user_agent = [
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
        "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
        "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
        "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
        "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
        "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
        "UCWEB7.0.2.37/28/999",
        "NOKIA5700/ UCWEB7.0.2.37/28/999",
        "Openwave/ UCWEB7.0.2.37/28/999",
        "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
        # iPhone 6：
        "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",

    ]
    return random.choice(user_agent)

'''
    读取需要下载链接列表文件内容，读取默认的配置文件的 seed 文件
'''
def read_urls():
    file = open(config.generalConfig.feed_file)
    lines = file.readlines()

    strr = ''.join(lines)

    urls = strr.split( )
    print(len(urls))
    return urls
'''
    读取需要下载链接列表文件内容，参数传入读取文件的类型
    
'''
def read_urls(str):
    if str == 'toutiao':
        file = open(config.generalConfig.toutiao_feed_file)
    elif str == 'weixin':
        file = open(config.generalConfig.weixin_feed_file)
    elif str == '小红书':
        file = open(config.generalConfig.little_red_book_file)
    else:
        file = open(config.generalConfig.feed_file)
    lines = file.readlines()

    strr = ''.join(lines)

    urls = strr.split( )
    print(len(urls))
    return urls

def get_prefix():
    time_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S_')
    return time_str


def readSeedFile(path):
    with open(path, 'r',encoding='UTF-8') as f:
        # print(f.read())
        return f.read()


def getExtention(item):

    elements = item.split("/")
    if elements[-1] == 'webp':
        return 'jpg'
    if len(elements[-1]) > 4:
        return 'jpg'
    return elements[-1]

def find_second_occurrence(parent_str, sub_str):
    first_index = parent_str.find(sub_str)
    if first_index == -1:
        return -1
    second_index = parent_str.find(sub_str, first_index + len(sub_str))
    return second_index

def mkdir(path):
    # 引入模块
    import os

    # 去除首位空格
    path = path.strip()
    # 去除尾部 \ 符号
    path = path.rstrip("\\")

    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists = os.path.exists(path)

    # 判断结果
    if not isExists:
        # 如果不存在则创建目录
        # 创建目录操作函数
        os.makedirs(path)

        print
        path + ' 创建成功'
        return True
    else:
        # 如果目录存在则不创建，并提示目录已存在
        print
        path + ' 目录已存在'
        return False

def file_extension(path):
    extension_name = os.path.splitext(path)[1]
    if extension_name == '.webp':
        return '.jpg'
    return os.path.splitext(path)[1]

def getContext(url):
    source_code = requests.get(url, headers=headers, verify=False)
    source_code.encoding = 'utf-8'
    plain_text = source_code.text
    soup = BeautifulSoup(plain_text,  'lxml')
    return soup

def getContextAsSelenium(url):
    user_agent = get_random_user_agent()
    chrome_options = Options()
    chrome_options.add_argument('-binary={0}'.format(chrome_location))
    chrome_options.add_argument('user-agent={0}'.format(user_agent))
    # self.chrome_options.add_argument('--proxy-server=http://127.0.0.1:7890')
    # 使用headless无界面浏览器模式
    chrome_options.add_argument('--headless')  # 增加无界面选项
    # 添加浏览器用户数据
    chrome_options.add_argument('--no-sandbox')  # 增加无界面选项
    chrome_options.add_argument('--disable-dev-shm-usage')  # 增加无界面选项
    chrome_options.add_argument('window-size=10,10')  # 增加无界面选项
    chrome_options.add_argument('--disable-gpu')  # 如果不加这个选项，有时定位会出现问题
    chrome_options.add_argument('blink-settings=imagesEnabled=false')
    chrome_options.add_argument(f'--user-data-dir={user_data_dir}')  # 用户数据，需要关闭已经运行的Chrome
    # self.driver = webdriver.Chrome("D:/Dev/006_Python/chromedriver-win64.V126/chromedriver.exe",
    #                                options=self.chrome_options)
    service = ChromeService(executable_path=r"D:\Software\Chrome\chromedriver-win64_131/chromedriver.exe")

    # 使用服务对象和选项对象初始化 Chrome WebDriver
    driver = webdriver.Chrome(service=service, options=chrome_options)
    driver.get(url)
    time.sleep(random.randint(50, 100))
    html = driver.page_source
    soup = BeautifulSoup(html, 'lxml')
    return soup


def img_download(download_links, title, date, parent_url, logger, offset=0, abbr_url=''):
    db_session = DBOperator()
    logger.logger.warn("Url Name is " + parent_url)
    i = 1
    size = len(download_links)
    for item in download_links:
        # 断点续爬 start
        # i = 出错编号 + 1
        if i < offset + 1:
            i = i + 1
            # print(i)
            continue
        # 断点续爬 end
        retry = True
        while retry:
            try:
                res = requests.get(item, headers=http_headers,timeout=(10, 30))
                # print(res.status_code)
                print(str(i) + "/" + str(size) + ": " + item)
                if date == None:
                    tmp_dir = path + title + "/"
                else:
                    tmp_dir = path + date + title + "/"
                print(tmp_dir)
                mkdir(tmp_dir)
                download_file_name = tmp_dir + str(i).zfill(3) + file_extension(item)
                with open(download_file_name, 'wb') as f:
                    f.write(res.content)
                    f.close()

                    # 检验下载图片是否重复
                    img_info = ImageInfo()
                    img_info.url = item

                    try:
                        img_info.image_print = file_print.get_img_print(download_file_name)
                    except OSError as e:
                        print(f"处理图像时出错: {e} - 可能文件不完整或损坏。")

                    d_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                    img_info.crawled_time = d_time
                    img_info.parent_url = parent_url
                    img_info.abbr_parent_url = abbr_url
                    # if parent_url.find('?') > 0:
                    #     img_info.abbr_parent_url = abbr_url
                    # else:
                    #     img_info.abbr_parent_url = parent_url
                        # time.sleep(3)
                retry = False
                break
            except ChunkedEncodingError:
                retry = True
                print("HTTP Exception Envolved!")
                logger.logger.warn("HTTP Exception Envolved!")
            except SSLError:
                retry = True
                print("SSL Exception Envolved!")
                logger.logger.warn("SSL Exception Envolved!")
            except ConnectionError:
                retry = True
                print("ConnectionError Exception Envolved!")
                logger.logger.warn("ConnectionError Exception Envolved!")
            # 验证重复后，删除文件
            except requests.exceptions.Timeout:
                retry = True
                print("Download Timeout Exception Envolved!")
                logger.logger.warn("Download Timeout Exception Envolved!")
        time.sleep(1)
        result = db_session.check_image_info(img_info)
        if len(result) > 0:
            logger.logger.warn(
                img_info.parent_url + " with image_print: " + str(img_info.image_print) + "have duplicated!")
            logger.logger.warn("File Name is " + download_file_name)
            # os.remove(download_file_name)
            try:
                os.rename(download_file_name, download_file_name[:download_file_name.rfind(".")]+"_duplicated" +
                         file_extension(item))
                duplicated_file_retry = False
            except FileExistsError:
                logger.logger.warn(
                    str(download_file_name) + " are exist, no need to download!")
                break
            i = i + 1
            continue
        db_session.add_image_info(img_info)
        i = i + 1
