import requests
from pprint import pprint


class Crawler:
    def __init__(self, input_, type_, filter_):
        self.input_ = input_
        self.type_ = type_
        self.filter_ = filter_

    def crawl(self, page):
        url = f'https://www.qqwtt.com?name={self.input_}&type={self.type_}'
        data = {
            'input': self.input_,
            'filter': self.filter_,
            'type': self.type_,
            'page': page,
        }
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.54',
            'x-requested-with': 'XMLHttpRequest'
        }
        response = requests.post(url, data, headers=headers)
        results = response.json()
        return results['data']

    def parse(self, data):
        new_data = []
        for one_data in data:
            parsed_data = {'author': one_data.get('author'),
                           'mp3_url': one_data.get('url'),
                           'title': one_data.get('title'),
                           'song_id': one_data.get('songid')}
            new_data.append(parsed_data)
        return new_data

    def crawler_main(self, page):
        results = self.parse(self.crawl(page))
        # print(results)
        return results


if __name__ == '__main__':
    crawler = Crawler(input_='wake', type_='netease', filter_='name')
    crawler.crawler_main(page=1)
