import requests
from bs4 import BeautifulSoup
import logging
import pandas as pd
import hashlib
import time
import re

class MusicScraper:
    def __init__(self, search_word, base_url, secret):
        self.search_word = search_word
        self.base_url = base_url
        self.secret = secret
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (like Gecko) Chrome/134.0.0.0 Safari/537.36"
        }

    # def generate_timestamp(self):
    #     return int(time.time())

    def get_total_pages(self):
        url = f'{self.base_url}/search'
        params = {"word": self.search_word}
        try:
            response = requests.get(url, headers=self.headers, params=params)
            response.raise_for_status()
        except requests.exceptions.RequestException as e:
            logging.error(f"unable to visit the site: {e}")
            return None

        soup = BeautifulSoup(response.text, 'html.parser')
        p = soup.find_all(name='ul', class_='el-pager')
        if p:
            t_p = p[0].find_all('li', class_='number')[-1].text
            total_pages = int(t_p)
        else:
            total_pages = 1
        logging.info(f"Total pages found: {total_pages}")
        return total_pages

    def create_song_info_signature(self, current_page=1):
        timestamp = int(time.time())
        total_pages = self.get_total_pages()
        if total_pages > 1:
            r = f'appid=16073360&pageNo={current_page}&pageSize=20&timestamp={timestamp}&type=1&word={self.search_word}'
        if total_pages == 1:
            r = f'appid=16073360&pageSize=20&timestamp={timestamp}&type=1&word={self.search_word}'
        r += self.secret
        signature = hashlib.md5(r.encode('utf-8')).hexdigest()
        return signature

    def build_fetching_parameters(self, current_page=1):
        timestamp = int(time.time())
        signature = self.create_song_info_signature(current_page)
        return {
            "word": self.search_word,
            "type": '1',
            "pageSize": '20',
            "pageNo": str(current_page),
            "appid": '16073360',
            "timestamp": timestamp,
            "sign": signature
        }

    def fetch_music_details(self, current_page=1):
        url = f'{self.base_url}/v1/search'
        params = self.build_fetching_parameters(current_page)
        try:
            response = requests.get(url, headers=self.headers, params=params)
            if response.history:
                logging.info(f'Sites redirected: {response.history}')
            response.raise_for_status()
        except requests.exceptions.RequestException as e:
            logging.error(f"Error in fetching music info: {e}")
            return None

        soup = BeautifulSoup(response.text, 'html.parser')
        box = soup.find_all(name='li', class_='pr t clearfix')
        song_box = soup.find_all(name='div', class_='song-box')
        singer_box = soup.find_all(name='div', class_='artist ellipsis')
        album_box = soup.find_all(name='div', class_='album ellipsis')

        music_details = []
        for song, singer_list, album in zip(song_box, singer_box, album_box):
            music_name = song.find('a').text
            song_id = song.find('a').get('href')
            T_idx = song_id.find('T')
            song_id = song_id[T_idx:]
            i = singer_list.find_all(name='a')
            singer = ", ".join(a.get_text(strip=True) for a in i)
            album_name = album.find('a').text
            album_id = album.find('a').get('href')
            P_idx = album_id.find('P')
            album_id = album_id[P_idx:]

            music_details.append({'music_name': music_name, 'singer': singer, 'song_id': song_id, 'album_name': album_name, "album_id": album_id})
        logging.info(f"Fetched {len(music_details)} songs from page {current_page}")
        return pd.DataFrame(music_details)

    def fetch_all_music_details(self):
        total_pages = self.get_total_pages()
        all_music_details = []
        for current_page in range(1, total_pages + 1):
            logging.info(f"Fetching page {current_page}/{total_pages}")
            music_details = self.fetch_music_details(current_page=current_page)
            if music_details is not None:
                all_music_details.append(music_details)
            else:
                logging.warning(f"Failed to fetch music info for page {current_page}")
        if all_music_details:
            return pd.concat(all_music_details, ignore_index=True)
        return None

    def create_link_signature(self, song_id, secret):
        TSID = song_id
        timestamp = int(time.time())
        r = f'TSID={TSID}&appid=16073360&timestamp={timestamp}'
        r += self.secret
        sign = hashlib.md5(r.encode('utf-8')).hexdigest()
        return sign

    def build_track_link_parameters(self, song_id):
        timestamp = int(time.time())
        signature = self.create_link_signature(song_id)
        return {
            "appid": '16073360',
            "timestamp": timestamp,
            "sign": sign,
            "TSID": song_id
        }

    def get_mp3_download_url(self, song_id):
        url = f'{self.base_url}/v1/song/tracklink'
        params = self.build_track_link_parameters(song_id)
        try:
            response = requests.get(url, params=params, timeout=10)
            response.raise_for_status()
            data = response.json()
            mp3_url = data.get('data', {}).get('path')
            if not mp3_url or not isinstance(mp3_url, str):
                logging.warning(f"No MP3 URL found for TSID: {song_id}")
                return None
            return mp3_url
        except requests.exceptions.RequestException as e:
            logging.error(f"Request exception occurred for TSID {song_id}: {e}")
        return None

    def save_to_file(self, file_name, song_id):
        mp3_url = self.get_mp3_download_url(song_id)
        file_name = re.sub(r'[\\/:*?"<>|]', '_', file_name)
        if not mp3_url:
            logging.warning("MP3 URL is empty. Nothing to write.")
            return

        try:
            response = requests.get(mp3_url, headers=self.headers)
            response.raise_for_status()
            with open(file_name, 'wb') as file:
                file.write(response.content)
            logging.info(f"Successfully downloaded MP3 to {file_name}")
        except Exception as e:
            logging.error(f"Failed to download MP3 to file: {e}")

if __name__ == "__main__":
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[logging.StreamHandler()]
    )
    secret = "0b50b02fd0d73a9c4c8c3a781c30845f"
    search_word = input("请输入歌手名：")
    base_url = "https://music.91q.com"

    try:
        scraper = MusicScraper(search_word, base_url, secret)
        all_music_details = scraper.fetch_all_music_details()
        if all_music_details is not None:
            for index, row in all_music_details.iterrows():
                song_id = row['song_id']
                file_name = f"{row['music_name']}_{song_id}.mp3"
                logging.info(f"Processing song: {row['music_name']} by {row['singer']}")
                scraper.save_to_file(file_name, song_id)
    except Exception as e:
        logging.error(f"Error occurred: {e}")