#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time :10/26/2023 8:19 AM
# @Author : shiyou pan

import datetime
import json
import re
import subprocess
from pathlib import Path
from typing import Union

import chardet
import requests
from bs4 import BeautifulSoup


def parse_json(instr: str) -> dict:
    """
    针对爬虫获取的json格式进行解析
    """
    try:
        json_text = json.loads(instr)
    except ValueError:
        start = instr.find("(") + 1 if instr.find("(") >= 0 else None
        end = instr[::-1].find(")") + 1 if instr[::-1].find("(") >= 0 else None
        json_text = json.loads(instr[start:-end])
    except Exception:
        json_text = None

    return json_text


def clean_text(instr: str) -> str | None:
    if instr is None:
        return None
    else:
        outstr = instr.replace('<em>', '').replace('</em>', '')
        return outstr


def find_name_position(name: str, name_list: list) -> int:
    """ 通常用于找企业名称 """
    name_list = [[re.sub('[)(）（]', '', clean_text(name_))
                  if name_ is not None else None
                  for name_ in x]
                 for x in name_list]
    name = re.sub('[)(）（]', '', name)

    name_index = [name in name_ for name_ in name_list]
    if max(name_index):
        return name_index.index(True)
    else:
        return -1


def clean_web_content(instr: str) -> str:
    """
    针对爬虫获取的网页文本进行数据清洗
    """
    soup_text = instr.strip()
    soup_text = re.sub('[\u3000\xa0\t]+', '', soup_text)
    soup_text = re.sub('[\n\r]+', '\n', soup_text)
    return soup_text


def get_html(url):
    headers = {'User-Agent': 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/'
                             '537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
    page = requests.get(url, headers=headers)
    if page.status_code == 200:
        encode = chardet.detect(page.content)
        page.encoding = encode.get('encoding')
        page = page.text
        soup = BeautifulSoup(page)
        return soup
    else:
        return None


def java_python_time(java_time: int = None, python_time: str = None) -> Union[str, int]:
    if java_time is not None:
        python_timestamp = java_time / 1000
        return datetime.datetime.fromtimestamp(python_timestamp).strftime('%Y-%m-%d %H:%M:%S')
    elif python_time is not None:
        if len(python_time) == 19:
            java_time = datetime.datetime.strptime(python_time, '%Y-%m-%d %H:%M:%S')
        elif len(python_time) == 10:
            java_time = datetime.datetime.strptime(python_time, '%Y-%m-%d')
        return int(java_time.timestamp() * 1000)


def get_proxy_list(max_page: int = 20):
    proxy_list = []
    for i in range(max_page):
        web_html = get_html(f'https://www.kuaidaili.com/free/intr/{i + 1}')
        web_html = web_html.find('div', attrs={'class': 'table-section'}).find('tbody')
        proxy_list.extend([[web_.select('td:nth-child(1)')[0].text,
                            web_.select('td:nth-child(2)')[0].text,
                            web_.select('td:nth-child(6)')[0].text.lstrip()]
                           for web_ in web_html.findAll('tr')])
    return proxy_list


def download_web_file(url: str, filepath: str) -> str:
    if Path(filepath).is_file():
        print('NOTE: file existed, will not download again....')
    else:
        subprocess.run(['curl', '-o', filepath, url], check=True)

    return filepath


if __name__ == '__main__':
    pass
