import datetime
import io
import logging
import os
import re
# from urlparse import urlsplit
from urllib.request import Request
from urllib.request import urlopen

import requests

from common import LoggerTool

logger = LoggerTool.get_logger(__name__)
header_proxy = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36',
    'cookie': 'channelid=0; sid=1666434798691454; _ga=GA1.2.1190767748.1666436402; _gid=GA1.2.1521003120.1666436402; _gcl_au=1.1.1550814062.1666436402; Hm_lvt_7ed65b1cc4b810e9fd37959c9bb51b31=1666436402; Hm_lpvt_7ed65b1cc4b810e9fd37959c9bb51b31=1666436412'
}

header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36',

    'cookie': '__utmz=42540256.1666436076.4.4.utmcsr=f0119.workarea5.live|utmccn=(referral)|utmcmd=referral|utmcct=/; CzG_visitedfid=19; CzG_sid=DxfigL; __utma=42540256.1762781628.1657932954.1666436076.1666786004.5; __utmc=42540256; __utmt=1; __utmb=42540256.1.10.1666786004'
}
default_time_format = '%Y-%m-%d %X'

noval_header ={
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
    'cookie':'H_WISE_SIDS_BFESS=61530_61522_61619; BAIDUID_BFESS=D126034FFFA34DCBC7D58114F710137D:FG=1; ZFY=DfHygksATy2syI7gcfwkp:AAGNCCXPqOeoJxOCtWzsQ8:C'
}
# 获取用户目录
def get_user_dir():
    return os.path.expanduser('~') + os.sep


# 替换特殊字符
def replace_special_char(old_str):
    if old_str is not None:
        new_str = re.sub(r'<+|>+|/+|‘+|’+|\?+|\|+|"+|：+|:+|【+|】+|\.+|~+|\*+|\.\.\.+|\�+|�+|\？+|≈+|ot;+|&ap;+', '', old_str)
        return new_str.strip()
    else:
        return old_str.strip()


# 替换并截取名字-porn使用
def replace_sub(old_str):
    title = replace_special_char(old_str)
    ind = title.index('-')
    return title[0:ind]


# 替换并截取名字-porn使用
def replace_sub_common(old_str, char_str, isLast=False):
    title = replace_special_char(old_str)
    if isLast:
        rfind = title.rfind(char_str)
        return title[0:rfind]
    else:
        ind = title.index(char_str)
        return title[0:ind]


def get_cur_dir():
    # 当前文件路径
    # curDir = os.path.abspath(os.curdir) + os.sep
    curDir = os.getcwd() + os.sep
    return curDir


def get_datetime(template):
    return datetime.datetime.now().strftime(template)


def image_size(image_url):
    image = requests.get(image_url).content
    image_b = io.BytesIO(image).read()
    size = len(image_b) / 1000
    print(size)
    return size


def file_size(url):
    request = Request(url, headers=header)
    response = urlopen(request).read()
    size = len(response) / 1000
    logger.info(size)
    return size


# 获取线程异常
def executor_callback(worker):
    worker_exception = worker.exception()
    if worker_exception:
        # logger.exception("Worker return exception: {}".format(worker_exception))
        # logger.exception(worker_exception.args)
        # 写入到对应的下载目录下
        with open('down_error_' + get_datetime('%Y-%m-%d') + '.log', 'a+', encoding='utf-8') as f:
            f.write('%s ; url:{%s} 失败 ; 失败原因：%s\n' % (
                get_datetime('%Y/%m/%d %H:%M:%S'), worker_exception.request.url, worker_exception.args))
        # raise worker_exception
        logger.exception("rquest:path_url:{}".format(worker_exception.request.url))
    # else :
    #     print(worker)


# 线程异常注解
def exception_handler(func):
    def inner_function(*args, **kwargs):
        try:
            func(*args, **kwargs)
        except TypeError:
            print(f"{func.__name__} only takes numbers as the argument")
        except TimeoutError:
            print(f"{func.__name__} timeout")
        except:
            print(f"{func.__name__} oe")

    return inner_function


import multiprocessing as mp


def my_handle(target, args_list):
    # configure logs
    for args in args_list:
        logger_id = args[0]  # first arg suffices to id a process, in my case
        logger = logging.getLogger(logger_id)
        handler = logging.FileHandler(logger_id + '.log')
        logger.setLevel(logging.INFO)
        logger.addHandler(handler)

    mp.set_start_method('spawn')  # bug fix, see below

    # build each process
    for args in args_list:
        p = mp.Process(target=target, args=args)
        p.start()


# 排序
def list_distinct(old_list):
    new_list = list(set(old_list))
    # 按照原来顺序去重
    new_list.sort(key=old_list.index)
    return new_list


def del_old_Undown_Text(file_dir):
    file_list = []
    for root, dirs, files in os.walk(file_dir):
        for file in files:
            if file.endswith('未下载.text'):
                file_list.append(os.path.join(root, file))
        # if len(file_list) >= 2:

        # 删除所有未下载记录文件
        for f in file_list:
            print('删除***未下载.text:' + f)
            os.remove(f)

        # 删除当前日期之前的未下载记录文件
        '''
        file_name = get_datetime('%Y-%m-%d') + '_未下载.text'
            
            
        for f in file_list:
            split = f.split('\\')
            L = len(split) - 1
            if split[L] != file_name:
                print('删除***未下载.text:' + f)
                os.remove(f)
        '''


# 获取项目路径
def get_project_dir():
    cur_dir = os.getcwd() + os.sep
    # 获取myProject，也就是项目的根路径
    rootDir = cur_dir[:cur_dir.find("DownPython\\") + len("DownPython\\")]
    return rootDir

if __name__ == '__main__':
    str='妻子的双手放在小郑的屁股上，用力往自己身上拉，仿佛在帮助小郑顶得更深。&ap;ot;芳姐，想不到你在床上这么骚，我要干死你这骚隆&ap;ot;小郑的话越来越粗鲁，却似乎更刺激了妻子。她猛得用力坐起来，将小郑推倒，小小郑也一下被挤出了体外。小郑还没明白过来，妻子已坐在了他双腿间，匆匆扶住那滑腻的坚挺急不可耐的坐了下去。&ap;ot;呃――&ap;ot;随着小郑的再一次深入，妻子一声长哼，她的双手撑在小郑胸上，臀部含住小小郑有节奏的一前一后磨着，低着头，任由长发垂在小郑胸前。妻子的忽然主动让小郑有些诧异，又有些激动，他一边享受着身体上熟妇的蠕动，一边抓住妻子的双乳，揉捏着，弄的妻子娇喘嘘嘘，情不自禁的捧住小郑的脸，又主动吻了上去。再一个长吻后，妻子坐在小郑身上，长发将两人的头一起遮住，媚眼如丝的凝视着小郑。小郑看着身上已熟透的少妇，下体轻轻一用力，少妇邹着眉哼了一声，却将身体往自己头上探出，竟是主动将双乳凑到他的口前。小郑惊喜的将她微微垂下的乳头一口含下，另一手握住不断在手中变幻着她乳房的形状，下体开始加力。妻子一边享受找乳头的刺激，一边承受起下体内越来越积累的瘙痒，仰头挺胸，紧咬下唇。她感觉到身体内的火越来越往，开始四处挤压，仿佛在寻找一个点迸发。她的臀有节奏的迎合着小郑的本章未完，请翻开下方下一章继续阅读'
    char = replace_special_char(str)
    print()
    