import json
import re, os, sys
import random
import redis
import requests, pyhttpx
from curl_cffi import requests as cffi_requests
from pymongo import MongoClient
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, BASE_DIR + '/venv/lib/site-packages')
from Icrawler9 import settings
from Icrawler9.tools import configs
from Icrawler9.tools.configs import REP_VUL

def bypass_ja3_post(url, headers, proxies, data, files={}, timeout=15):
    """requests, pyhttpx, cffi模块post请求"""
    sess = 'sess'
    resp = None
    try:
        sess = 'sess1'
        sess1 = requests.session()
        if headers: sess1.headers = headers
        content_type = headers.get('content-type') if headers.get('content-type') else 'application/x-www-form-urlencoded'
        if proxies: sess1.proxies = proxies
        resp = parse_post_req(content_type, sess1, url, data, files, timeout)
        if resp.status_code not in [200, 202]: raise ValueError
    except Exception as e0:
        try:
            sess = 'sess2'
            sess2 = pyhttpx.HttpSession(browser_type='chrome', http2=True)
            if headers: sess2.headers = headers
            if proxies: sess2.proxies = proxies
            # resp = sess2.get(url, proxies=proxies, allow_redirects=False, timeout=timeout)
            resp = parse_post_req(content_type, sess2, url, data, files, timeout)
            if resp.status_code not in [200, 202]: raise ValueError
        except Exception as e1:
            try:
                sess = 'sess3'
                sess3 = cffi_requests.Session()
                sess3.impersonate = random.choice(configs.impersonates)
                if headers: sess3.headers = headers
                if proxies: sess3.proxies = proxies
                # resp = sess3.get(url, proxies=proxies, impersonate='chrome124', timeout=timeout)
                # resp = sess3.get(url, proxies=proxies, impersonate=random.choice(configs.impersonates), timeout=timeout)
                resp = parse_post_req(content_type, sess3, url, data, files, timeout)
            except Exception as e:
                print(e)
    # print('{}: {}'.format(sess, resp.status_code)) if resp else print('{}: {}'.format(sess, str(None)))
    return resp

def parse_post_req(content_type, sess, url, data, files={}, timeout=15):
    """不同请求模块通用post请求方式"""
    if 'application/json' in content_type.lower():
        resp = sess.post(url, json=data, timeout=timeout, allow_redirects=False)
    elif 'text/xml' in content_type.lower():
        resp = sess.post(url, data=data.encode("utf-8"), timeout=timeout, allow_redirects=False)
    elif 'multipart/form-data' in content_type.lower():
        resp = sess.post(url, data=data, files=files, timeout=timeout, allow_redirects=False)
    else:
        resp = sess.post(url, data=data, timeout=timeout, allow_redirects=False)
    return resp

def bypass_ja3_get(url, headers, proxies, param={}, timeout=15):
    """requests, pyhttpx, cffi模块get请求"""
    sess = 'sess'
    resp = None
    try:
        sess = 'sess1'
        sess1 = requests.session()
        sess1.headers = headers
        sess1.proxies = proxies
        resp = sess1.get(url, params=param, timeout=timeout, allow_redirects=False)
        if resp.status_code not in [200, 202]: raise ValueError
    except Exception as e0:
        try:
            sess = 'sess2'
            sess2 = pyhttpx.HttpSession(browser_type='chrome', http2=True)
            if headers: sess2.headers = headers
            if proxies: sess2.proxies = proxies
            resp = sess2.get(url, timeout=timeout, allow_redirects=False)
            if resp.status_code not in [200, 202]: raise ValueError
        except Exception as e1:
            try:
                sess = 'sess3'
                sess3 = cffi_requests.Session()
                sess3.impersonate = random.choice(configs.impersonates)
                if headers: sess3.headers = headers
                if proxies: sess3.proxies = proxies
                # resp = sess3.get(url, proxies=proxies, impersonate='chrome124', timeout=timeout)
                resp = sess3.get(url, timeout=timeout)
            except Exception as e:
                print(e)
    # print('{}: {}'.format(sess, resp.status_code)) if resp else print('{}: {}'.format(sess, str(None)))
    return resp

def get_td_text(td, text_reg, space=' '):
    try:
        text = td.xpath(text_reg).extract()
    except:
        text = []
    if not text:
        try:
            text = td.xpath(text_reg[:text_reg.rfind('/')]).extract()
        except:
            text = []
    # td_text_ = [t.replace('○', '').replace('•', '').replace('Ø', '').replace('‧', '').replace('', '').replace('', '').replace('•', '').replace('⚫', '').replace('●', '').strip() for t in text]
    td_text_ = [re.sub(r'○|•|Ø|‧|||•|⚫|●|\*|■', '', t) for t in text]
    td_text_ = space.join([t.strip() for t in td_text_ if t.strip()])
    td_text = re.sub(r'\s+|<(?!(small|/small|sub|/sub|sup|/sup))[^>]*>', ' ', td_text_) if td_text_ else ''
    return td_text.strip()

def parse_list_json_value(host, tr, rowspan_lists, tr_ind, ut=0, td_reg=None, td_text_reg=None, space=' '):
    """解析table标签list_json value"""
    # ut: 0-text  1-href  2-text|||href
    values = []
    tds = tr.xpath('./*') if not td_reg else tr.xpath(td_reg)
    for td in tds:
        href_x = td.xpath('.//a/@href | .//button/@onclick').extract_first()
        href_r = re.search(r'\"(.*)\"', href_x) if href_x else ''
        href = href_r.group(1) if href_r else href_x.strip() if href_x else ''
        src = td.xpath('.//img/@src').extract_first()
        td_rowspan = td.xpath('./@rowspan').extract_first()
        td_colspan = td.xpath('./@colspan').extract_first()
        if href and ('javascript' not in href):
            url = href if re.search(r'^http', href) else host + href.strip('..')
            if not td_text_reg: td_text_reg = './/text()' 
            text = get_td_text(td, td_text_reg, space)
            # if not td_text_reg:
            #     text = ' '.join([re.sub(r'\r|\t|\n|\s+', ' ', t) for t in deal_list_space(td.xpath('.//text()').extract()) if t])
            # else:
            #     text = ' '.join([re.sub(r'\r|\t|\n|\s+', ' ', t) for t in deal_list_space(td.xpath(td_text_reg).extract()) if t])
            if ut == 2:
                val = (text + '|||' + url)
            elif ut == 1:
                val = url
            else:
                val = text
        elif src:
            val = src if re.search(r'^http', src) else host + src.strip('..')
        else:
            if not td_text_reg: td_text_reg = './/text()' 
            val = get_td_text(td, td_text_reg, space)
            # if not td_text_reg:
            #     val = ' '.join([re.sub(r'\r|\t|\n|\s+', ' ', t) for t in deal_list_space(td.xpath('.//text()').extract()) if t])
            # else:
            #     val = ' '.join([re.sub(r'\r|\t|\n|\s+', ' ', t) for t in deal_list_space(td.xpath(td_text_reg).extract()) if t])
            # if td_rowspan:
            #     val = ''.join([t.replace('<br>', ' ') for t in deal_list_space(td.xpath('.//text()').extract())])
            # else:
            #     val = '|'.join([t.replace('<br>', ' ') for t in deal_list_space(td.xpath('.//text()').extract())])
            if val in REP_VUL:
                val = ''
        values.append(val)
        if td_colspan:
            num = int(td_colspan)
            for i in range(num-1):
                values.append(val)
        if td_rowspan:
            num = int(td_rowspan)
            td_ind = values.index(val)
            rowspan_lists.append((tr_ind, td_ind, num, val))
    for rowspan_tuple in rowspan_lists:
        ind = rowspan_lists.index(rowspan_tuple)
        raw_tr_ind = rowspan_tuple[0]
        raw_td_ind = rowspan_tuple[1]
        num = rowspan_tuple[2]
        val = rowspan_tuple[3]
        if raw_tr_ind < tr_ind < num + raw_tr_ind:
            values.insert(raw_td_ind, val)
            # if raw_td_ind == 0:
            #     values.insert(0, val)
            # else:
            #     values.insert(raw_td_ind - 1, val)
    return values

def parse_list_json_key(td_reg, text_reg, tr1, tr2=None, key2=None):
    key1 = []
    key2 = [] if not key2 else key2
    tds1 = tr1.xpath(td_reg)
    tds2 = tr2.xpath(td_reg) if tr2 else []
    for td1 in tds1:
        src = td1.xpath('.//img/@src').extract_first()
        if src:
            td_text = src
        else:
            td_text = get_td_text(td1, text_reg)
        col_span = td1.xpath('./@colspan').extract_first()
        row_span = td1.xpath('./@rowspan').extract_first()
        if col_span and int(col_span) > 1:
            key1.append([td_text] * int(col_span))
        elif not row_span or int(row_span) < 2:
            key1.append([td_text])
        else:
            key1.append(td_text)
    for td2 in tds2:
        td_text = get_td_text(td2, text_reg)
        key2.append(td_text)
    for i, k1 in enumerate(key1):
        ind = i
        if isinstance(k1, list):
            k2 = key2[:len(k1)]
            ks = lists_add((k1, k2))
            key1[ind] = ks
            key2 = key2[len(k2):]
            # for i in range(len(k2)+1):
            #     key2.pop(i)
    keys = []
    for key in key1:
        if isinstance(key, list):
            for k in key:
                keys.append(k)
        else:
            keys.append(key)
    return keys

def parse_list_json_keys(tbody, keys_rowspan, tds_reg, key_text_reg, k):
    """" 解析table标签list_json key """
    if keys_rowspan in [1]:
        k_tds = tbody.xpath('./*[{}]'.format(keys_rowspan + k)).xpath(tds_reg)
        keys = []
        for k_td in k_tds:
            td_text = get_td_text(k_td, key_text_reg)
            keys.append(td_text)
            td_colspan = k_td.xpath('./@colspan').extract_first()
            if td_colspan:
                num = int(td_colspan)
                for i in range(num-1):
                    keys.append(td_text)
    elif keys_rowspan in [2]:
        tr1 = tbody.xpath('./*[{}]'.format(keys_rowspan - 1 + k))
        tr2 = tbody.xpath('./*[{}]'.format(keys_rowspan + k))
        keys = parse_list_json_key(tds_reg, key_text_reg, tr1, tr2)
    else:
        tr1 = tbody.xpath('./*[{}]'.format(keys_rowspan - 1 + k))
        tr2 = tbody.xpath('./*[{}]'.format(keys_rowspan + k))
        keys = parse_list_json_key(tds_reg, key_text_reg, tr1, tr2)
        for kr in range(keys_rowspan - 2, 0, -1):
            tr1 = tbody.xpath('./*[{}]'.format(kr + k))
            keys = parse_list_json_key(tds_reg, key_text_reg, tr1, None, keys)
    return keys

def check_json_format(resp):
  """ 用于判断一个response是否符合Json格式 """
  if isinstance(resp, str):
    try:
      json.loads(resp)
    except:
      return False
    return True
  else:
    return False

def get_proxys(proxy=configs.proxy):
    proxy_server = random.choice(proxy.get('PROXY_SERVERS'))
    proxy_host = proxy_server.rsplit(':', maxsplit=1)[0].split('//')[-1]
    proxy_port = proxy_server.rsplit(':', maxsplit=1)[-1]
    proxy_username = proxy.get('PROXY_USER')
    proxy_pwd = proxy.get('PROXY_PASS')
    proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
        "host": proxy_host,
        "port": proxy_port,
        "user": proxy_username,
        "pass": proxy_pwd,
    }
    proxies = {
        'http': proxyMeta,
        'https': proxyMeta,
    }
    return proxies

def xp_tag(html, tag, spac, flag=0):
    """ 
    xpath提取处理文本内容:
    tag样式: //*[contains(@class, "")]
    flag: 是否处理特殊字符 如 ○|•|‧
    """
    text = spac.join([re.sub(r'\s+', spac, re.sub(r'<(?!(small|/small|sub|/sub|sup|/sup))[^>]*>', '', sx)).strip() for sx in html.xpath(tag).extract() if sx.strip()]).strip()
    if flag: text = re.sub(r'○|•|‧', '', text).strip()
    return text

def cli_redis(db=10, host=None):
    if host == 'spider':
        pool = redis.ConnectionPool(host='192.168.1.13', port=6380, password='aws2023', db=db, health_check_interval=30)
    else:
        pool = redis.ConnectionPool(host=settings.REDIS_HOST, port=settings.REDIS_PORT, password=settings.REDIS_PARAMS.get('password'), db=db, health_check_interval=30)
    redis_cli = redis.Redis(connection_pool=pool)
    return redis_cli


def cli_mongo():
    client = MongoClient(host=settings.MONGO_HOST, port=settings.MONGO_PORT)
    db = client.admin
    db.authenticate(settings.MONGO_USER, settings.MONGO_PSW)
    return client, client.spider