import re
import time
from os.path import exists

from base import BASE_PATH,PROXIES

import logging
import os

import requests
from pyquery import PyQuery as py
import json

# 定义日志
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s: %(message)s')

# 爬取的链接
BASE_URL = 'https://www.gequhai.com/top/jingdian'
# 总的页数
TOTAL_PAGE = 10
# 存储目录
RESULIS_DIR = BASE_PATH + os.sep + 'results' + os.sep + 'spilder_audio'

# 列表总页数
total_page = 10

# 存储目录
RESULIS_DIR = BASE_PATH + os.sep + 'results' + os.sep + 'spilder_audio'


def request_scrape(url, method='GET', data=None, is_json=False, headers_extend=None):
    """
    请求url 获取响应: 文本或者json
    :param url:
    :return:
    """
    logging.info('开始请求url, url: %s', url)
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
        }
        if (headers_extend):
            # 合并传入的header扩展值
            headers.update(headers_extend)

        # 使用代理看能否避免ip风控
        proxies = PROXIES
        # 避免出现IP风控，请求加上10秒的间隔
        time.sleep(5)
        response = requests.request(method, url, data=data, headers=headers, proxies=proxies)
        if response.status_code == 200:
            result = response.text if not is_json else response.json()
            return result
        logging.error('请求状态码错误, code: %s', response.status_code)
    except requests.RequestException as e:
        # 错误信息日志-打印堆栈信息 exc_info = True
        logging.error('请求报错，url: %s', url, exc_info=True)


def scrape_index(page):
    """
    根据page 生成列表页链接
    :param page:
    :return:
    """
    url = BASE_URL + f'/?page={page}'
    return url


def parse_index(html):
    """
    解析列表页源码 ，返回详情页链接
    :param html:
    :return:
    """
    # 初始化py对象
    doc = py(html)
    # 获取所有的超链接
    a_all = doc('a.text-info')
    results = []
    if (len(a_all) != 0):
        # 获取href属性
        for a in a_all.items():
            # 拼接链接并追加到集合中
            results.append('https://www.gequhai.com/' + a.attr('href'))

    return results


def parse_detail(detail_urls):
    """
    解析详情页源码，获取到音频名称、音频链接、歌词
    :param detail_urls 详情页url:
    :return:
    """
    # 解析详情页源码，获取到音频名称、音频链接、歌词
    music_info = []
    for url in detail_urls:
        detial_html = request_scrape(url)
        # 提取音频名称
        doc = py(detial_html)
        h1 = doc('#song-title')
        song_name = h1.text()
        # 解析提取到歌词
        song_gc = doc('#content-lrc2').text() if doc('#content-lrc2') else None
        # print("song_name",song_name)
        # 正则匹配获取 play_id 加密串
        result = re.search("window\.play_id\s*=\s*'([^']+)'", detial_html, re.S)
        play_id = result.group(1) if result else None
        music_link = ""
        # print("play_id",play_id)
        # 通过 play_id 请求api 地址获取到音频地址
        if play_id:
            headers_extend = {
                'X-Requested-With': 'XMLHttpRequest',
                'X-Custom-Header': 'SecretKey'
            }
            res = request_scrape('https://www.gequhai.com/api/music', 'POST', {'id': play_id}, is_json=True,
                                 headers_extend=headers_extend)
            music_link = res.get('data').get('url') if res.get('data') else None
            # print('music_link', music_link)
        music_info.append([song_name, music_link, song_gc])
    return music_info


def sanitize_filename(filename):
    """
    非法字符替换。保存文件前，先过滤掉或替换掉非法字符。
    :param filename:
    :return:
    """
    # Windows非法字符: \ / : * ? " < > |
    return re.sub(r'[\\/:*?"<>|]', '_', filename)

def save_files(taget_path, name, data):
    """
    存储文件： 歌词或者音频
    :param taget_path: 目标文件夹
    :param name: 文件名称(包含后缀)
    :param data: 数据
    :return:
    """
    # 判断下目标目录是否存在，不存在则创建
    target_path = f'{RESULIS_DIR}/{taget_path}'
    # 如果存储目录不存在，则创建文件夹目录
    if not exists(target_path):
        os.makedirs(target_path)
    # 非法字符替换 先过滤掉或替换掉非法字符。
    clean_name = sanitize_filename(name)
    file_path = f'{target_path}/{clean_name}'
    logging.info('存储文件的路径: %s', file_path)
    # 判断是否是歌词
    if clean_name.lower().endswith('lrc'):
        # 歌词存储到指定目录
        with open(file_path, mode='wt', encoding='utf-8') as f:
            f.write(data)
    else:
        # 非歌词认为是音频文件
        # 以流模式下载，可以支持大文件  data在这里是 music_link  链接
        # 下载需要请求链接，或者数据，避免请求太频繁，休眠5s
        time.sleep(5)
        with requests.get(data, stream=True) as r:
            r.raise_for_status()
            with open(file_path, 'wb') as f:
                for chunk in r.iter_content(chunk_size=8192):
                    if chunk:  # 过滤掉keep-alive空数据包
                        f.write(chunk)


def run():
    # 循环每一页
    for page in range(1, total_page + 1):
        # 获取每一个列表页url
        index_url = scrape_index(page=page)
        # 获取列表页html源码
        html = request_scrape(index_url)
        # 提取列表页中，详情页 url
        detail_urls = parse_index(html)
        logging.info('详情页链接如下: %s', detail_urls)
        # # 解析详情页源码，获取到音频名称、音频链接、歌词
        music_info = parse_detail(detail_urls)
        # 下载音频和歌词
        for item in music_info:
            logging.info("每一个音频信息: %s", item)
            # print("每一项",item)
            song_name = item[0]
            music_link = item[1]
            song_gc = item[2]
            # 拼接歌词文件name
            song_gc_name = song_name.split('.')[0] + '.lrc'
            logging.info("歌词文件名: %s", song_gc_name)
            # 存储文件-歌词
            if song_gc:
                save_files('audio_gc', song_gc_name, song_gc)
            # 存储文件-音频
            save_files('audio_music', song_name, music_link)


if __name__ == '__main__':
    # 测试
    # index_url = scrape_index(page=2)
    # html = request_scrape(index_url)
    # # print(html)
    # detail_urls = parse_index(html)
    # print(detail_urls)

    run()
