# 根据歌曲id爬取歌词，生成csv文件，进行一些对齐处理

import re
import json
import sys
import os
import shutil
word_stat = {}
import csv
from __future__ import unicode_literals
import urllib.request
sys_type = sys.getfilesystemencoding()
import json
import requests
from lxml import etree
import simplejson
import operator
from functools import reduce
ua = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
headers = {
    'User-agent': ua
}
class CrawlerLyric:
    def __init__(self):
        self.author_name = ""

    def get_url_html(self, url):
        with requests.Session() as session:
            response = session.get(url, headers=headers)
            text = response.text
            html = etree.HTML(text)
        return html

    def get_url_json(self, url):

        with requests.Session() as session:
            response = session.get(url, headers=headers)
            text = response.text
            text_json = simplejson.loads(text)
        return text_json

    def parse_song_id(self, html):

        song_ids = html.xpath("//ul[@class='f-hide']//a/@href")
        song_names = html.xpath("//ul[@class='f-hide']//a/text()")
        self.author_name = html.xpath('//title/text()')
        song_ids = [ids[9:len(ids)] for ids in song_ids]
        return self.author_name, song_ids, song_names

    def parse_lyric(self, text_json):
        try:
            lyric = text_json.get('lrc').get('lyric')
            regex = re.compile(r'\[.*\]')
            final_lyric = re.sub(regex, '', lyric).strip()
            return final_lyric
        except AttributeError as k:
            print(k)
            pass

    def get_album(self, html):
        album_ids = html.xpath("//ul[@id='m-song-module']/li/p/a/@href")
        album_names = html.xpath("//ul[@id='m-song-module']/li/p/a/text()")
        album_ids = [ids.split('=')[-1] for ids in album_ids]
        return album_ids, album_names

    def get_top50(self, sing_id):
        url_singer = 'https://music.163.com/artist?id='+str(sing_id)  # 陈奕迅
        html_50 = self.get_url_html(url_singer)
        author_name, song_ids, song_names = self.parse_song_id(html_50)
        # print(author_name, song_ids, song_names)
        for song_id, song_name in zip(song_ids, song_names):
            url_song = 'http://music.163.com/api/song/lyric?' + 'id=' + str(song_id) + '&lv=1&kv=1&tv=-1'
            json_text = self.get_url_json(url_song)
            print(song_name)
            print(self.parse_lyric(json_text))
            print('-' * 30)

    def get_all_song_id(self, album_ids):

        with requests.Session() as session:
            all_song_ids, all_song_names = [], []
            for album_id in album_ids:
                one_album_url = "https://music.163.com/album?id="+str(album_id)
                response = session.get(one_album_url, headers=headers)
                text = response.text
                html = etree.HTML(text)
                album_song_ids = html.xpath("//ul[@class='f-hide']/li/a/@href")
                album_song_names = html.xpath("//ul[@class='f-hide']/li/a/text()")
                album_song_ids = [ids.split('=')[-1] for ids in album_song_ids]

                all_song_ids.append(album_song_ids)
                all_song_names.append(album_song_names)

        return all_song_ids, all_song_names

    def get_all_song_lyric(self,singer_id):
        album_url = "https://music.163.com/artist/album?id="+str(singer_id)+"&limit=150&offset=0"
        html_album = self.get_url_html(album_url)
        album_ids, album_names = self.get_album(html_album)
        all_song_ids, all_song_names = self.get_all_song_id(album_ids)
        all_song_ids = reduce(operator.add, all_song_ids)
        all_song_names = reduce(operator.add, all_song_names)
        print(all_song_ids)
        print(all_song_names)


def gethtml(url):
    page = urllib.request.urlopen(url)
    html= page.read()
    # html.decode('utf-8').encode(sys_type)
    print(html.decode())
    return html


def getmusic(html):
    reg = r'href="/song\?id=[0-9]{0,9}"'

    musicre = re.compile(reg)
    musiclist_temp = re.findall(musicre, html.decode())
    musiclist = []
    for item in musiclist_temp:
        musiclist.append(item[12:-1])
    return musiclist



def write_csv(japanese_lyrics, chinese_lyrics, fliename):
    # 检查两个列表的长度是否相等
    if len(chinese_lyrics) != len(japanese_lyrics):
        # 可能是最后多了一个空行
        if len(chinese_lyrics) == len(japanese_lyrics) + 1:
            chinese_lyrics.pop()
        elif len(chinese_lyrics) + 1 == len(japanese_lyrics):
            japanese_lyrics.pop()
        else:
            # 尝试去除所有''
            chinese_lyrics = list(filter(None, chinese_lyrics))
            japanese_lyrics = list(filter(None, japanese_lyrics))
            if len(chinese_lyrics) != len(japanese_lyrics):
                return

    # 打开CSV文件，写入模式
    with open(f'/media/data3/twt/work/assignment/final/lyric/{fliename}.csv', 'w', newline='', encoding='utf-8') as csvfile:
        # 创建一个csv写入对象
        csvwriter = csv.writer(csvfile)

        # 写入表头
        csvwriter.writerow(['Chinese', 'Japanese'])

        # 写入歌词
        for chinese, japanese in zip(chinese_lyrics, japanese_lyrics):
            csvwriter.writerow([chinese, japanese])

def getlyricfromurl(str):
    data_json = gethtml('http://music.163.com/api/song/lyric?os=pc&' + str +'&lv=-1&kv=-1&tv=-1')
    data = json.loads(data_json)
    reg = r'[^0-9\[\]\.\:]'
    string = re.compile(reg)
    try:
        lyriclist = re.findall(string, data['lrc']['lyric'])
        tlyriclist = re.findall(string, data['tlyric']['lyric'])
    except:
        return
    if tlyriclist == [] or tlyriclist == [' ']: # 没有翻译or没有歌词的情况
        return
    
    str_lrc = ','.join(lyriclist).replace(',', '')
    tstr_lrc = ','.join(tlyriclist).replace(',', '')
    jp_list = str_lrc.split('\n')
    # 翻译对齐：一般日语歌词前面会有作词作曲（编曲），而中文翻译只有by:xxx，所以第一句日语歌词就是没有冒号的
    # idea:歌词翻译可能还要关注上下文，而不是单独看一句
    # 删除前面的作词作曲
    while "词" in jp_list[0] or "曲" in jp_list[0] or jp_list[0] == '':
        jp_list.pop(0)
    
    ch_list = tstr_lrc.split('\n')
    # 除去第一个byxxx
    if ch_list[0].startswith('by'):
        ch_list = ch_list[1:]
    write_csv(jp_list, ch_list, str)


if __name__ == '__main__':

    # ID = 496869422
    # getlyricfromurl('id='+str(ID))

    with open("/media/data3/twt/work/assignment/final/codebase/Submit/Spider/artist2songID.json", "r", encoding="utf-8") as f:
        artist2songID = json.load(f)
    
    # 手动选出日语歌比较多的歌手
    # target_artists = [159300, 33760539, 15229, 190527, 50901152, 159644, 1075075, 15800, 14270, 12263373, 35135807, 896913, 1193225, 14242, 15558, 12334348]
    # target_artists_women = [829001, 981185, 16152, 18122, 160154, 37231059, 16995, 13059968, 31464106, 160729, 16456, 17635, 1002089, 191316, 30616560, 1053279, 12062125, 1120106, 35488854]
    target_artists_comb = [33927412, 21132, 12390232, 21138, 15021101, 1087614, 1064050, 12676813, 18771, 22492, 20453, 13058010]
    for artist_id, song_ids in artist2songID.items():
        if int(artist_id) not in target_artists_comb:
            continue
        
        for song_id in song_ids:
            getlyricfromurl('id='+song_id)