# coding:utf-8

import time
from urllib import request

from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

user_agents = [
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
    'Opera/9.25 (Windows NT 5.1; U; en)',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
    'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
    'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
    "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 "
    "Chrome/16.0.912.77 Safari/535.7",
    "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
]

# 西刺免费代理 http://www.xicidaili.com/
# 芝麻Http(高速http代理) http://h.zhimaruanjian.com/
proxy_list = [
    {'http': '183.15.172.23:61430'},
    {'http': '114.34.58.46:58619'},
    {'http': '45.115.175.20:35308'},
    {'https': '119.119.111.162:4675'},
]

# 热门歌单
hot_url = u'https://music.163.com/#/discover/playlist/?order=hot&cat=%s&limit=35&offset=%s'


# def get_song_list_by_cat(cat):
#     offset = 0
#     song_list = []
#     try_times = 0
#     while True:
#         time.sleep(10)
#         print('开始', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
#         url = hot_url % (cat, offset)
#         try:
#             req = request.Request(url, headers={'User-Agent': random.choice(user_agents)})
#             html = request.urlopen(req).read().decode()
#             print(html)
#         except (request.HTTPError, request.URLError) as e:
#             print(e)
#             continue
#         soup = BeautifulSoup(html, 'html.parser')
#         list_soup = soup.find('ul', {'class': 'm-cvrlst f-cb'})
#         if list_soup is None and try_times < 200:
#             try_times += 1
#             # continue
#             break
#         elif list_soup is None or len(list_soup) <= 1:
#             break
#         print(list_soup)


def get_song_list_by_cat(cat, page):
    offset = 0
    song_list = []
    try_times = 0
    print('开始', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    while True:
        time.sleep(10)
        print('当前下载页: %d' % (offset // 35))
        url = hot_url % (cat, offset)
        try:
            options = Options()
            options.add_argument('--headless')
            driver = webdriver.Chrome(executable_path=r'C:\Users\yuyan\Desktop\chromedriver_win32\chromedriver.exe',
                                      options=options)
            driver.get(url)
            driver.switch_to.frame('contentFrame')
            html = driver.page_source
            # print(html)
        except (request.HTTPError, request.URLError) as e:
            print(e)
            continue
        soup = BeautifulSoup(html, 'html.parser')
        list_soup = soup.find('ul', {'class': 'm-cvrlst f-cb'})
        if list_soup is None and try_times < 200:
            try_times += 1
            # continue
            break
        elif list_soup is None or len(list_soup) <= 1:
            break
        # print(list_soup)
        for song in list_soup.findAll('li'):
            song_url = song.find_all('img', class_='j-flag')[0]['src']
            song_name = song.find_all('a', class_='msk')[0]['title']
            song_view_counts = song.find_all('span', class_='nb')[0].string
            song_creator = song.find_all('a', class_='nm')[0]['title']
            # print(song_url, song_name, song_view_counts, song_creator)
            song_list.append(
                {'name': song_name, 'creator': song_creator, 'url': song_url, 'view_counts': song_view_counts}
            )
        print(song_list)
        # db_helper.save_song_to_db(song_list)
        if (offset // 35) >= page:
            break
        offset += 35
    return song_list
