import re
import os
import six
import json
import time
import execjs
import base64
import moment
import urllib
import hashlib
import dateparser
from urllib import parse
from urllib.parse import urlparse
from crawler.connection import redis_conn
from dateparser.search import search_dates
from crawler.utils.timeutils import TimeUtils
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta


# 分布式计数
def get_distribute_count(redis_key, info_list: list):
    count = redis_conn.incr(redis_key)
    index = count % len(info_list)
    info = info_list[index]
    return info


def deal_data(text):
    """
    处理文本：例如
    :param text:
    :return: []
    """
    #  text = """
    #  Blazers
    # Cashmere
    #  """
    lists = [_.strip() for _ in text.split('\n') if _.replace(' ', '')]
    return lists


def list_dict_remove_repeat():
    """
    列表套字典去重
    :return:
    """
    unique_list = [dict(t) for t in {tuple(d.items()) for d in c}]
    return unique_list


# 打开js文件，编译
def eval_js_code(filename, cwd=''):
    with open(cwd + filename, 'r', encoding='utf8') as f:
        eval_code = execjs.compile(f.read())
        return eval_code


def file_operate(filename, method, cwd='', content=''):
    if cwd.endswith('/'):
        with open(cwd + filename, method) as f:
            if 'r' in method or 'rb' in method:
                file_code = f.read()
            else:
                f.write(content)
                return
    elif not cwd:
        with open(filename, method) as f:
            if 'r' in method or 'rb' in method:
                file_code = f.read()
            else:
                f.write(content)
                return
    else:
        with open(cwd + '/' + filename, method) as f:
            if 'r' in method or 'rb' in method:
                file_code = f.read()
            else:
                f.write(content)
                return
    return file_code


def get_scrapy_response_cookies(response, cookie_name_list: list = None):
    """获取scrapy响应的cookies，若指定一个或多个cookie_name取当前需要的cookies"""
    cookies = dict()
    cookies_str = ''
    for cookie in response.headers.getlist("Set-Cookie"):
        cookie = cookie.decode()
        cookies_str += cookie
        cookies_str += ' 1data '

    for cookie_name in cookie_name_list:
        re_rule = cookie_name + '=(.*?);'
        result = re.search(re_rule, cookies_str)
        if result:
            cookies[cookie_name] = result.group(1)
    return cookies


def dict_to_query(query_item):
    """字典转换为form形式的body"""
    query_str = ''
    for key, value in query_item.items():
        key_quote = parse.quote(key, safe='')
        if type(value) == int:
            value_quote = value
        else:
            value_quote = parse.quote(value, safe='')
        query_str += '&{}={}'.format(key_quote, value_quote)
    return query_str.strip('&')


def query_to_dict(query_str):
    """form形式的的body转换为字典"""
    query_item = dict()
    temp_item_list = query_str.split('&')
    for temp_item in temp_item_list:
        sub_temp_item_list = temp_item.split('=')
        sub_key = sub_temp_item_list[0]
        sub_value = sub_temp_item_list[1]
        query_item[sub_key] = sub_value
    return query_item


def get_env():
    """获取环境变量"""
    env = os.environ.get('crawler_scm_trace_sea_env')
    return env


def timestamp():
    return str(int(time.time() * 1000))


def get_time_stamp(time_str):
    """时间字符串转换为时间戳"""
    if ':' in time_str:
        # 先转换为时间数组
        time_array = time.strptime(time_str, "%Y-%m-%d %H:%M:%S")
    else:
        time_array = time.strptime(time_str, "%Y-%m-%d")

    # 转换为时间戳
    time_stamp = int(time.mktime(time_array))
    return time_stamp


def get_str_time(time_stamp):
    """时间戳转换为时间字符串"""
    if len(str(time_stamp)) == 13:
        time_stamp = float(time_stamp / 1000)
    time_array = time.localtime(time_stamp)
    time_str = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
    return time_str


def generate_md5(code, length=None):
    res = hashlib.md5(code.encode('utf8')).hexdigest()
    if length is None:
        return res
    else:
        return res[:length]


def merge_content(content_list):
    """
    合并列表文本
    """
    return ''.join(i.strip() for i in content_list)


def response_extractor(response, rule):
    """
    详情解析器
    """
    try:
        content = response.css(rule).extract_first("").strip()
    except:
        content = response.xpath(rule).extract_first("").strip()
    return content


def get_website_domain(url):
    """
    拿到网站域名:www.baidu.com,www.cpnn.com.cn等
    """
    if not isinstance(url, str):
        return
    url = url.lower()
    if not url.startswith('http'):
        return
    try:
        try:
            try:
                domain = re.findall('//(.+?):\d+/', url)[0]
            except:
                domain = re.findall('//(.+?)/', url)[0]
        except:
            try:
                domain = re.findall('//(.+?):\d+', url)[0]
            except:
                domain = re.findall('//(.+)', url)[0]
    except:
        print('垃圾链接     {}'.format(url))
        domain = None
    return domain


# 获取7天内的时间字符串列表
def get_ago_time(days):
    ago_time_list = []
    for i in range(1, days):
        now_time = datetime.now()
        ago_time = (now_time + timedelta(days=-i)).strftime('%Y-%m-%d')
        ago_time_list.append(ago_time)
    return ago_time_list


def parse_ago_time(day=7):
    """获取N天前时间戳，默认为7"""
    before_days = date.today() - timedelta(days=day)
    ago_time = int(time.mktime(before_days.timetuple()))
    return ago_time


def get_n_days_time(days=0):
    """获取n天前后的时间(字符串类型)"""
    now_time = datetime.now()
    ago_time = (now_time + timedelta(days=int(days))).strftime('%Y-%m-%d')
    return ago_time


def get_n_days_time_start(start=None, days=0):
    """获取n天前后的时间(字符串类型)"""
    if not start:
        start_date = datetime.now()
    else:
        start_date = datetime.strptime(start, "%Y-%m-%d")
    ago_time = (start_date + timedelta(days=int(days))).strftime('%Y-%m-%d')
    return ago_time


# 获取url参数
def url_query_to_dict(url):
    query = urllib.parse.urlparse(url).query
    return dict([(k, v[0]) for k, v in urllib.parse.parse_qs(query).items()])


def bytes_to_str(s, encoding="utf-8"):
    """Returns a str if a bytes object is given."""
    if six.PY3 and isinstance(s, bytes):
        return s.decode(encoding)
    return s


def format_gmt_time(timestamp, gmt_hours=8):
    """时间戳GMT转换"""
    if len(str(timestamp)) == 13:
        timestamp = int(timestamp / 1000)
    format_time = datetime.fromtimestamp(timestamp) + timedelta(hours=gmt_hours)
    return str(format_time)


def response_cookie_split_str(cookie_set):
    """拆分set_cookie为字符串"""
    cookies = ""
    for cookie in cookie_set:
        cookies += cookie.decode().split(';')[0] + "; "
    return cookies


def response_cookie_split_dict(cookie_set):
    """拆分set_cookie为字典"""
    cookies = {}
    for cookie in cookie_set:
        coo = cookie.decode().split('; ')[0].split('=', 1)
        cookies[coo[0]] = coo[1]
    return cookies


def switch_cookie_dict_to_str(cookie_dict):
    """将字典类型的cookie转换为字符串"""
    cookies_str = ""
    for k, v in cookie_dict.items():
        cookies_str += k + '=' + v + '; '
    return cookies_str


def request_headers_split_dict(cookie_set):
    """将请求头的cookie拆分为字典"""
    """ ['visid_incap_2473297=4ZNzZqYRSrSzV2cIf25ogHJIxGEAAAAAQUIPAAAAAACbe+cFxt+FWVFAFrx7nUmE; incap_ses_1509_2473297=e6azWyk7Gi1mfLmDrQvxFHJIxGEAAAAAwcsS5lf0PER/yE9TcrwPIQ==; ']"""
    cookies = {}
    for cookie in cookie_set:
        for one in cookie.decode().split('; '):
            try:
                key = one.split('=', 1)[0]
                value = one.split('=', 1)[1]
            except:
                continue
            cookies[key] = value
    return cookies


def clean_space_string(origin_string):
    """清洗带空格的数据"""
    replace_str_list = ['\t']
    if origin_string:
        origin_string = origin_string.strip()
        for replace_str in replace_str_list:
            origin_string = origin_string.replace(replace_str, '')
    return origin_string


def get_list_data(origin_list, index):
    """
    获取列表中的元素
    """
    if len(origin_list) - 1 < index:
        return ""
    else:
        return origin_list[index]


# 打开js文件，编译
def open_code(*args):
    with open(args[0] + args[1], 'r', encoding='utf8') as f:
        eval_code = execjs.compile(f.read())
        return eval_code


def get_requests_proxies(proxy):
    """获取requests请求使用的proxy"""
    if proxy:
        return {
            'http': proxy,
            'https': proxy
        }
    return None


# 论坛专用
def get_request_pages(comment_num, page_comment_num, page_num):
    """根据系统page_num设定，倒序获取页码，首页必请求"""
    fetch_page_num = int(int(comment_num) / page_comment_num + 1)
    if fetch_page_num <= page_num:
        pages = [page for page in range(1, fetch_page_num + 1)]
    else:
        pages = [page for page in range(fetch_page_num, fetch_page_num - page_num, -1)]
    pages.append(1)
    return set(pages)


# md5加密
def md5(code, length=None):
    res = hashlib.md5(code.encode('utf8')).hexdigest()
    if length is None:
        return res
    else:
        return res[:length]


# 获取url参数
def url_query_to_dict(url):
    query = urllib.parse.urlparse(url).query
    return dict([(k, v[0]) for k, v in urllib.parse.parse_qs(query).items()])


# cst格式时间转标准时间
def cst_to_localtime(cst_time):
    to_format = '%Y-%m-%d %H:%M:%S'
    from_format = '%a %b %d %H:%M:%S +0800 %Y'
    time_struct = time.strptime(cst_time, from_format)
    times = time.strftime(to_format, time_struct)
    return times


# cst格式时间转标准时间戳
def cst_to_localtime_stamp(cst_time):
    timestamp = time.mktime(time.strptime(cst_time, '%a %b %d %H:%M:%S +0%f %Y'))
    if len(str(int(timestamp))) == 10:
        timestamp = int(timestamp * 1000)
    return timestamp


def parse_more_time(time_str):
    # 格式化更多种类的时间

    def _search_dates(time_str):
        date_time = search_dates(time_str,
                                 languages=['zh'],
                                 settings={
                                     'DATE_ORDER': 'YMD',
                                     'STRICT_PARSING': True,
                                     'PREFER_LANGUAGE_DATE_ORDER': True,
                                     'PREFER_DATES_FROM': 'past'
                                 })
        return int(time.mktime(date_time[0][1].timetuple())) * 1000

    def _is_valid_hour_time(time_str):
        '''判断是否是一个有效的日期字符串'''
        try:
            time.strptime(time_str, '%H:%M')
            return True
        except:
            return False

    def _format_date(time_str):
        '''
            格式化时间以便可以正常解析
            1、转化 年、月、日 为 -
            2、去掉 . 为 -
            3、去掉 / 为 -
        '''
        if '月' in time_str and '日' in time_str:
            time_str = time_str \
                .replace('年', '-') \
                .replace('月', '-') \
                .replace('日', ' ')

        if '.' in time_str:
            time_str = time_str \
                .replace('.', '-')

        if '/' in time_str:
            time_str = time_str.replace('/', '-')
        return time_str

    time_str = _format_date(time_str)

    if _is_valid_hour_time(time_str):
        time_str = ' '.join([
            datetime.now().strftime('%Y-%m-%d'),
            time_str
        ])

    if len(time_str.split('-')) == 2 or '前' in time_str:
        date_tuple = search_dates(str(time_str))
        if date_tuple:
            date_t = date_tuple[0][1]  # 解析出的时间
            now_t = datetime.now()  # 当前的时间
            if date_t > now_t:  # 超过了当前时间
                return int(time.mktime((date_tuple[0][1] - relativedelta(years=1)).timetuple()) * 1000)
            return int(time.mktime(date_tuple[0][1].timetuple()) * 1000)
        else:
            time_parser = dateparser.parse(str(time_str))
            return int(time.mktime(time_parser.timetuple()) * 1000)
    else:
        timestamp = None
        try:
            timestamp = _search_dates(time_str)
        except:
            moment_time = moment.date(time_str)
            if moment_time:
                timestamp = _search_dates(moment_time.format('YYYY-M-D H:m:s'))
        finally:
            return timestamp


# 解析时间
def parse_time(time_str):
    try:
        time_str = time_str.strip()
        if '刚刚' in time_str or '刚才' in time_str:
            return int(time.time() * 1000)

        if '天前' in time_str:
            day = int(time_str.replace('天前', '').strip())
            return parse_ago_time(day=day)

        if '小时前' in time_str:
            hours = time_str.replace('小时前', '').strip()
            if '半' in hours:
                return parse_ago_minutes_time(minutes=30)
            hours = int(hours)
            return parse_ago_hours_time(hours=hours)

        if '分钟前' in time_str:
            minutes = int(time_str.replace('分钟前', '').strip())
            return parse_ago_minutes_time(minutes=minutes)

        if '秒前' in time_str:
            seconds = int(time_str.replace('秒前', '').strip())
            return parse_ago_seconds_time(seconds=seconds)

        if '/' in time_str:
            time_str = time_str.replace('/', '-')

        data_sj = time.strptime(time_str, "%Y-%m-%d %H:%M:%S")  # 定义格式
        return int(time.mktime(data_sj) * 1000)
    except:
        return parse_more_time(time_str)


# 解析另外格式的时间
def parse_other_time(time_str):
    try:
        time_str = time_str.strip()
        if '刚刚' in time_str or '刚才' in time_str:
            return int(time.time() * 1000)

        if '天前' in time_str:
            day = int(time_str.replace('天前', '').strip())
            return parse_ago_time(day=day)

        if '小时前' in time_str:
            hours = time_str.replace('小时前', '').strip()
            if '半' in hours:
                return parse_ago_minutes_time(minutes=30)
            hours = int(hours)
            return parse_ago_hours_time(hours=hours)

        if '分钟前' in time_str:
            minutes = int(time_str.replace('分钟前', '').strip())
            return parse_ago_minutes_time(minutes=minutes)

        if '秒前' in time_str:
            seconds = int(time_str.replace('秒前', '').strip())
            return parse_ago_seconds_time(seconds=seconds)

        if '月前' in time_str and '(' in time_str:  # 针对 成都落户网站做的时间解析
            time_parser = dateparser.parse(str(time_str.split('(')[0]))
            month = re.findall(r'.*\((.*)\)', time_str)[0]
            time_text = re.sub(r'-\d{2}-\d{2}', '-' + month, str(time_parser))
            return int(time.mktime(time.strptime(time_text.split('.')[0], "%Y-%m-%d %X")) * 1000)

        if '/' in time_str:
            if time_str.count(':') == 1 and time_str.count('/') == 1 and ' ' not in time_str:  # 202003/3108:07
                time_list = list(time_str)
                time_list.insert(4, '/')
                time_list.insert(10, ' ')
                time_str = ''.join(time_list) + ':00'
            else:
                time_str = time_str.replace('/', '-')

        if time_str.count('-') == 2 and time_str.count(':') == 1:  # 2021-03-17 11:17
            if len(re.findall(r'.*-(.*):', time_str)[0]) == 4 or len(re.findall(r'.*-(.*):', time_str)[0]) == 3:
                time_str = time_str[:10] + ' ' + time_str[10:]  # 2022-01-2110:04
            time_str = time_str + ':00'

        if len(time_str.split('-')[0]) == 6 and time_str.count('-') == 1:  # 202011-27
            time_str = str(time_str.split('-')[0][:4]) + '-' + str(time_str.split('-')[0][4:6]) + '-' + str(
                time_str.split('-')[1]) + ' 00:00:00'

        if time_str.count(':') == 1 and time_str.count('-') != 1 and (
                '年' not in time_str or '月' not in time_str or '日' not in time_str):
            now_date = time.strftime('%Y-%m-%d ')
            time_str = now_date + time_str + ':00'

        if time_str.count('-') == 1:
            now_date = time.strftime('%Y-')
            if time_str.count(':') == 1:
                time_str = now_date + time_str
            else:
                time_str = now_date + time_str + ' 00:00:00'

        try:
            data_sj = time.strptime(time_str, "%Y-%m-%d %H:%M:%S")  # 定义格式
        except:
            data_sj = time.strptime(time_str, "%Y-%m-%d")  # 定义格式
        finally:
            return int(time.mktime(data_sj) * 1000)
    except:
        return parse_more_time(time_str)


def get_time_stamp1(time_sj):  # 传入单个时间,类型为str
    try:
        if '刚刚' in time_sj or '刚才' in time_sj:
            return int(time.time() * 1000)

        if '/' in time_sj:
            time_sj = time_sj.replace('/', '-')

        if re.match(r'\d+:\d+', time_sj) or re.match(r'\d+:\d+:\d+', time_sj) and len(time_sj) < 7:
            today_time = time.strftime("%Y-%m-%d ", time.localtime(time.time()))
            time_sj = today_time + time_sj

        if re.match(r'\d+-\d+-\d+', time_sj) and len(time_sj) <= 10:
            now_time = time.strftime(" %H:%M:%S", time.localtime(time.time()))
            time_sj = time_sj + now_time

        if time_sj.count(':') < 2:
            time_sj += ':00'

        data_sj = time.strptime(time_sj, "%Y-%m-%d %H:%M:%S")  # 定义格式
        time_int = int(time.mktime(data_sj))
        return time_int * 1000
    except:
        return parse_time(time_sj)


def get_time_stamp2(time_sj):  # 传入单个时间,类型为str,简单版
    time_sj = time_sj.strip()
    if '刚刚' in time_sj or '刚才' in time_sj:
        return int(time.time() * 1000)

    if '/' in time_sj:
        time_sj = time_sj.replace('/', '-')

    if re.match(r'\d+:\d+', time_sj) or re.match(r'\d+:\d+:\d+', time_sj) and len(time_sj) < 7:
        today_time = time.strftime("%Y-%m-%d ", time.localtime(time.time()))
        time_sj = today_time + time_sj

    if re.match(r'\d+-\d+-\d+', time_sj) and len(time_sj) <= 10:
        now_time = time.strftime(" %H:%M:%S", time.localtime(time.time()))
        time_sj = time_sj + now_time

    if time_sj.count(':') < 2:
        time_sj += ':00'

    data_sj = time.strptime(time_sj, "%Y-%m-%d %H:%M:%S")  # 定义格式
    time_int = int(time.mktime(data_sj))
    return time_int * 1000


def parse_ago_time(day=0):
    """获取N天前时间戳，默认为今天"""
    import datetime
    before_days = datetime.date.today() - datetime.timedelta(days=day)
    ago_time = int(time.mktime(before_days.timetuple())) * 1000
    return ago_time


def parse_ago_hours_time(hours=0):
    """获取N小时前时间戳，默认为当前时间"""
    import datetime
    before_hours = datetime.datetime.now() - datetime.timedelta(hours=hours)
    ago_time = int(time.mktime(before_hours.timetuple())) * 1000
    return ago_time


def parse_ago_minutes_time(minutes=0):
    """获取N分钟前时间戳，默认为当前时间"""
    import datetime
    before_minutes = datetime.datetime.now() - datetime.timedelta(minutes=minutes)
    ago_time = int(time.mktime(before_minutes.timetuple())) * 1000
    return ago_time


def parse_ago_seconds_time(seconds=0):
    """获取N秒前时间戳，默认为当前时间"""
    import datetime
    before_seconds = datetime.datetime.now() - datetime.timedelta(seconds=seconds)
    ago_time = int(time.mktime(before_seconds.timetuple())) * 1000
    return ago_time


# 通过文本获取时间戳
def get_timestamp(datestr):
    return int(time.mktime(dateparser.parse(datestr).timetuple())) * 1000


def get_standard_timestamp(time_str):
    time_array = time.strptime(time_str, "%Y-%m-%d %H:%M:%S")
    time_stamp = int(time.mktime(time_array) * 1000)
    return time_stamp


# 获取标准时间格式
def get_time_string(time_stamp):
    if len(str(time_stamp)) == 13:
        time_stamp = int(time_stamp / 1000)
    time_array = time.localtime(time_stamp)
    time_string = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
    return time_string


# 获取当前时间离0点时间差
def get_offset_today():
    zero_stamp = int(time.mktime(dateparser.parse('今天 00:00').timetuple())) * 1000
    return time.time() * 1000 - zero_stamp


# 生成验证信息
def gen_authorization():
    env = os.environ.get('spider_man_env', 'dev')
    env = env.split('_')[0]
    if env == 'local':
        env = 'dev'
    return md5(env)


# 判断是否为json
def is_json(json_str):
    try:
        json.loads(json_str)
    except ValueError as e:
        return False
    return True


# 生成uuid
def get_url_uuid(origin_url):
    if origin_url.startswith('https://'):
        origin_url = origin_url.replace('https://', 'http://', 1)
    origin_url = trim(origin_url, '/')
    return md5(origin_url)


def trim_left(str, replace_str):
    if str.startswith(replace_str):
        return str[len(replace_str):]
    return str


def trim_right(str, replace_str):
    if str.endswith(replace_str):
        return str[0: -len(replace_str)]
    return str


def trim(str, replace_str):
    return_str = trim_left(str, replace_str)
    return trim_right(return_str, replace_str)


# 过滤HTML
def filter_html(html):
    pattern = re.compile(r'<[^>]+>', re.S)
    return pattern.sub('', html)


# base64 decode
def base64decode(origin_str):
    if (len(origin_str) % 3 == 1):
        origin_str += '=='
    elif (len(origin_str) % 3 == 2):
        origin_str += '='
    origin_str = bytes(origin_str, encoding='utf8')
    return base64.b64decode(origin_str).decode()


# 如果只有日期没有时间，hour, minute, second转换成当前采集时间
def optimazat_pubtime(timestamp):
    """
    如果只有日期没有时间，hour, minute, second转换成当前采集时间
    """
    if timestamp:
        if TimeUtils.is_zero_time(timestamp):
            now_date = time.strftime('%Y-%m-%d %H:%M:%S')
            target_date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp / 1000))
            target_date = target_date[:-8] + now_date[-8:]
            timestamp = int(time.mktime(time.strptime(target_date, '%Y-%m-%d %H:%M:%S'))) * 1000
    return timestamp


def get_website_domain(url):
    """
    拿到网站域名:www.baidu.com,www.cpnn.com.cn等
    """
    if not isinstance(url, str):
        return
    url = url.lower()
    if not url.startswith('http'):
        return
    try:
        try:
            try:
                domain = re.findall('//(.+?):\d+/', url)[0]
            except:
                domain = re.findall('//(.+?)/', url)[0]
        except:
            try:
                domain = re.findall('//(.+?):\d+', url)[0]
            except:
                domain = re.findall('//(.+)', url)[0]
    except:
        print('垃圾链接     {}'.format(url))
        domain = None
    return domain


def get_website_complete_domain(url):
    """
    拿到网站域名:baidu.com,cpnn.com.cn等
    """
    if not isinstance(url, str):
        return
    url = url.lower()

    if not url.startswith('http'):
        return

    domain = urlparse(url).netloc
    scheme = urlparse(url).scheme

    return scheme + '://' + domain


def get_website_domain_new(url):
    """
    拿到网站域名:baidu.com,cpnn.com.cn等
    """
    if not isinstance(url, str):
        return
    url = url.lower()

    if not url.startswith('http'):
        return

    domain = urlparse(url).netloc
    if 'www' in domain:
        return domain.replace('www.', '')

    return domain


def time_zone_to_time_stamp(time_zone, hours):
    """
    时区转时间戳
    传入时区时间 2022-01-26T16:28:10.292+0800
    返回时间戳、当前时间戳
    """
    import datetime
    format_time = re.sub('- |Z', '', time_zone.split('.')[0].replace('T', ' '))
    pub_time = int(time.mktime(time.strptime(format_time, "%Y-%m-%d %H:%M:%S")))
    now_time = int(datetime.datetime.now().timestamp())
    if (now_time - pub_time) / 3600 >= hours:
        return True
    else:
        return False


def is_timeout(mid_time, hours):
    """
    mid_time : 时间戳
    hours : 超过时间
    """

    import datetime
    if len(str(mid_time)) == 13:
        mid_time = int(mid_time / 1000)
    if (int(datetime.datetime.now().timestamp()) - mid_time) / 3600 >= hours:  # 列表页中显示的最新回帖时间超过24小时结束循环请求，减少请求次数
        return True
    else:
        return False


def get_news_path(response, xpath):
    """
    获取详情页的站点层级
    首页>>双碳与ESG
    """
    news_list = response.xpath(xpath).extract()
    news_path_list = list()
    for news in news_list:
        if not news.replace('\n', '').replace(' ', '').replace('>', ''):
            continue
        news_path_list.append(news.replace('\n', '').replace(' ', '').replace('>', '').replace('\r', ''))
    return '>>'.join([i for i in news_path_list])


def get_public_item(task, table_name):
    item = {
        'origin': task,
        'table_name': table_name,
    }
    return item


def get_aboard_ip():
    # proxy_ip = ["userID-851-orderid-1732-region-hk", "MlOuex", 'data.jifengdaili.com:10000']
    proxy_ip = ["userID-958-orderid-1960-region-hk", "2leMqi", 'data.jifengdaili.com:10000']
    # proxy_ip = ["t16910164470914", "adv0o620", 'z301.kdltps.com:15818']

    # 隧道域名:端口号
    tunnel = proxy_ip[2]
    # 用户名密码方式
    username = proxy_ip[0]
    password = proxy_ip[1]
    proxies = {
        "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": username, "pwd": password, "proxy": tunnel},
        "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": username, "pwd": password, "proxy": tunnel}
    }
    return proxies


def get_tunnel_ip():
    proxy_ip = [
        ("t16910164470914", "adv0o620", 'z301.kdltps.com:15818'),
        ("t17867543792615", "lccikmk3", 'x820.kdltps.com:15818'),
        ("t18266514985828", "vj5eyktx", 'p703.kdltps.com:15818'),
        ("t18266476510985", "sdsi9itc", 'o135.kdltps.com:15818'),
    ]
    count = redis_conn.incr('tunnel_proxy_count')
    ips = proxy_ip[count % len(proxy_ip)]

    # 隧道域名:端口号
    tunnel = ips[2]
    # 用户名密码方式
    username = ips[0]
    password = ips[1]
    proxies = {
        "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": username, "pwd": password, "proxy": tunnel},
        "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": username, "pwd": password, "proxy": tunnel}
    }
    return proxies
