import pymysql
import os
from urllib import request
from urllib.parse import quote
import string
import re
import time
import random
import threading
from concurrent.futures import ThreadPoolExecutor

# mysql操作工具类
# https://www.cnblogs.com/iruance/p/14364894.html
# https://www.cnblogs.com/liu-ke/p/5092391.html

# 下载图片
# https://www.cnblogs.com/liu-ke/p/5092391.html

# 图片下载
class mysqlHelper:
    # 一个类中必须需要init方法，哪怕什么都不做
    def __init__(self):
        self.init_mysql()

    def init_mysql(self):
        try:
            global conn
            global cursor
            conn = pymysql.connect(
                host="127.0.0.1",
                user="root",
                db="caomei",
                passwd="123456",
                charset="utf8",
                use_unicode=True
            )

            cursor = conn.cursor(pymysql.cursors.DictCursor)  # 返回结果为字典

        except Exception as e:
            print("db connect error：", e)

    def select(self, sql):
        try:
            cursor.execute(sql)  # 通过游标执行查询语句

            return cursor.fetchall()
        except Exception as e:
            print("select error", e)

    def get_conn(self):
        return conn

    def get_cursor(self):
        return cursor

    def close(self):
        if cursor:
            cursor.close()

        if conn:
            conn.close()


if __name__ == "__main__":

    db = mysqlHelper()

    # 定义要匹配的特殊字符
    special_chars = '!@#$%^&*(){}[]<>?/\|&#126;`？，。,.'

    update_id_list = []  # 待更新的id列表

    USER_AGENTS = [
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
        "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
        "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
        "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
        "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
        "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
        "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
        "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
        "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
    ]

    PROXY_LIST = {
        'http': ['http://183.207.95.27:80', 'http://111.6.100.99:80', 'http://122.72.99.103:80',
                 'http://106.46.132.2:80', 'http://112.16.4.99:81', 'http://123.58.166.113:9000',
                 'http://118.178.124.33:3128', 'http://116.62.11.138:3128', 'http://121.42.176.133:3128',
                 'http://111.13.2.131:80', 'http://111.13.7.117:80', 'http://121.248.112.20:3128',
                 'http://112.5.56.108:3128', 'http://42.51.26.79:3128', 'http://183.232.65.201:3128',
                 'http://118.190.14.150:3128', 'http://123.57.221.41:3128', 'http://183.232.65.203:3128',
                 'http://166.111.77.32:3128', 'http://42.202.130.246:3128', 'http://122.228.25.97:8101',
                 'http://61.136.163.245:3128', 'http://121.40.23.227:3128', 'http://123.96.6.216:808',
                 'http://59.61.72.202:8080', 'http://114.141.166.242:80', 'http://61.136.163.246:3128',
                 'http://60.31.239.166:3128', 'http://114.55.31.115:3128', 'http://202.85.213.220:3128'],
        'https': ['https://183.207.95.27:80', 'https://111.6.100.99:80', 'https://122.72.99.103:80',
                  'https://106.46.132.2:80', 'https://112.16.4.99:81', 'https://123.58.166.113:9000',
                  'https://118.178.124.33:3128', 'https://116.62.11.138:3128', 'https://121.42.176.133:3128',
                  'https://111.13.2.131:80', 'https://111.13.7.117:80', 'https://121.248.112.20:3128',
                  'https://112.5.56.108:3128', 'https://42.51.26.79:3128', 'https://183.232.65.201:3128',
                  'https://118.190.14.150:3128', 'https://123.57.221.41:3128', 'https://183.232.65.203:3128',
                  'https://166.111.77.32:3128', 'https://42.202.130.246:3128', 'https://122.228.25.97:8101',
                  'https://61.136.163.245:3128', 'https://121.40.23.227:3128', 'https://123.96.6.216:808',
                  'https://59.61.72.202:8080', 'https://114.141.166.242:80', 'https://61.136.163.246:3128',
                  'https://60.31.239.166:3128', 'https://114.55.31.115:3128', 'https://202.85.213.220:3128']
    }

    def start_download():
        target_link = "https://www.ikanhm.xyz/book/613"

        select_download_data(target_link)

    def select_download_data(target_link):
        start = time.time()

        if target_link:
            select_sql = "SELECT hm.`id`, hm.`comic_link`, hm.`comic_name`, hm.`chapter_link`, hm.`chapter_name`, hm.`img_link`, hm.`img_name`, hm.`img_sufix` FROM `hanman` hm WHERE hm.`download` = 0 AND hm.comic_link = '" + target_link + "' LIMIT 100"
        else:
            print("target link empty!")
            return

        # img_info_list = db.select(select_sql)
        # print(type(img_info_list))
        # print(len(img_info_list))
        # download_img(img_info_list)

        try:
            while True:
                img_info_list = db.select(select_sql)

                if len(img_info_list) > 0:

                    download_img(img_info_list)
                else:
                    break
        finally:
            cursor.close()
            conn.close()

        end = time.time()
        print("time consuming", end - start, " s!")

    def download_img(img_info_list):
        update_sql = "UPDATE `hanman` SET `download` = %s, `update_time` = NOW() WHERE `id` = %s"

        # print("img_list", img_list)

        # 保存目录
        root_dir = "D:\\file\\my"

        # 当前漫画的名称，随便取一个元素即可，这里取第一个
        comic_name = img_info_list[0]['comic_name']
        print("comic name", comic_name)

        if not os.path.exists(root_dir):
            os.mkdir(root_dir)

        # 拼接当前漫画的保存路径
        comic_dir = root_dir + '\\' + comic_name + '\\'

        if not os.path.exists(comic_dir):
            os.mkdir(comic_dir)

        conn = db.get_conn()
        cursor = db.get_cursor()
        update_id_list = []

        # lock = threading.Lock()  # 线程锁，用来保证更新id的安全
        thread_pool = ThreadPoolExecutor(10)  # 创建10个线程

        for img_info in img_info_list:
            # download_img_detail(conn, cursor, img_info, comic_dir, update_sql, update_id_list)
            thread_pool.submit(download_img_detail, conn, cursor, img_info, comic_dir, update_sql, update_id_list)

        thread_pool.shutdown(True)

        # 最后也要提交一下
        # 需要将列表转化为元组，例如：[(1, '1'), (1, '2')]
        cursor.executemany(update_sql, update_id_list)
        conn.commit()

        print("less than 100! batch update success!")


    def download_img_detail(conn, cursor, img_info, comic_dir, update_sql, update_id_list):
        chapter_name = img_info['chapter_name']

        chapter_name = delete_special_char(chapter_name)

        # 章节路径
        chapter_dir = comic_dir + chapter_name

        if not os.path.exists(chapter_dir):
            try:
                os.mkdir(chapter_dir)
            except Exception as e:
                print("create dir", chapter_dir, "error", e)

        img_link = img_info['img_link']
        # print("img_link", img_link)
        # img_link = img_link.replace("r.ikanhm.xyz", "69.197.146.58")  # 替换域名为ip

        # 解决反爬虫措施 start
        # 请求类型
        http_type = img_link.split('://')[0]

        # 创建handler对象
        handler = request.ProxyHandler({"\'" + http_type + "\'": random.choice(PROXY_LIST[http_type])})

        # 创建opener对象
        opener = request.build_opener(handler)

        # opener对象添加请求头信息
        opener.addheaders = [{"user-agent", random.choice(USER_AGENTS)}]

        # opener设置为全局安装
        request.install_opener(opener)
        # 解决反爬虫措施 end

        # 不能重新赋值位 img_link ，以 url 代替 img_link
        url = quote(img_link, safe=string.printable)
        img_file = request.urlopen(url, timeout=10)
        byte = img_file.read()

        img_name = img_info['img_name']
        img_sufix = img_info['img_sufix']

        print("img name", img_name, "download complete! size:", byte.__len__() / 1024, "kb")

        # 以二进制格式打开一个文件只用于写入。如果该文件已存在则将其覆盖。如果该文件不存在，创建新文件。
        # while_file = open(chapter_dir + '\\' + file_name + '.jpg', 'wb')

        # 以二进制格式打开一个文件用于追加。如果该文件已存在，文件指针将会放在文件的结尾。也就是说，
        # 新的内容将会被写入到已有内容之后。如果该文件不存在，创建新文件进行写入。
        while_file = open(chapter_dir + '\\' + img_name + '.' + img_sufix, 'ab')
        while_file.write(byte)
        while_file.close()

        print("img name", img_name, "save complete!")

        # try:
        #     # 获取锁
        #     lock.acquire()
        #
        #     # 插入的元素是元组，元组中的每一项对应sql中的占位符
        #     update_id_list.append((1, str(img_info['id'])))
        # finally:
        #     # 释放锁
        #     lock.release()

        # 插入的元素是元组，元组中的每一项对应sql中的占位符
        update_id_list.append((1, str(img_info['id'])))

        if len(update_id_list) == 100:
            try:
                # 需要将列表转化为元组，例如：[(1, '1'), (1, '2')]
                cursor.executemany(update_sql, update_id_list)
                conn.commit()
                print("batch update success!")

                # 清除元组列表
                update_id_list = []
            except Exception as e:
                print("batch update error", e)

    def get_special_char_pattern():
        # 定义要匹配的特殊字符
        special_chars = '!@#$%^&*(){}[]<>?/\|&#126;`？，。,.'

        # 转义后的特殊字符
        escape_special_chars = re.escape(special_chars)
        # print(escape_special_chars)

        # %表示匹配任意一个字符集合中的字符，[%s]表示匹配任意一个空白字符（包括空格、制表符、换行符等）
        return re.compile(r'[%s]' % escape_special_chars)

    # 移除开头和结尾的特殊字符
    def delete_special_char(chapter_name):
        pattern = get_special_char_pattern()

        # 字符串中所有的特殊字符
        matches = pattern.findall(chapter_name)

        # 移除文本前面的特殊字符
        for target_char in matches:
            if chapter_name.startswith(target_char):
                chapter_name = chapter_name[1:len(chapter_name)]  # -1表示最后一个字符

        # 移除文本后面的特殊字符
        for target_char in matches:
            if chapter_name.endswith(target_char):
                chapter_name = chapter_name[0:-1]  # -1表示倒数第一个字符

        return chapter_name


    start_download()