# -*- coding: utf-8 -*-
import json, sys, urllib, urllib2, gzip, StringIO, re, os, time, threading, socket, base64
from bs4 import BeautifulSoup

class Api:
    def __init__(self):
        pass

    def postHttpData(self, url, data, cookie=None):
        httpdata = None
        # try:
        req = urllib2.Request(url)
        req.add_header("User-Agent",
                       "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.157 Safari/537.36")
        req.add_header("Accept", "*/*")
        req.add_header("Content-Type", "application/x-www-form-urlencoded")
        req.add_header("Accept-Encoding", "gzip,deflate")
        req.add_header("Accept-Language", "en-us,en;q=0.8")
        req.add_header("Cookie", "os=pc")
        req.add_header("Cookie", "appver=1.9.1.103164")
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
        response = opener.open(req, data)
        httpdata = response.read()
        if response.headers.get('content-encoding', None) == 'gzip':
            httpdata = gzip.GzipFile(fileobj=StringIO.StringIO(httpdata)).read()
        response.close()
        httpdata = json.loads(httpdata)
        if len(httpdata) == 0:
            return None

        return httpdata

    def getHttpData(self, url, cookie=None):
        # log('Frech: ' + url)
        try:
            req = urllib2.Request(url)
            req.add_header("User-Agent",
                           "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.157 Safari/537.36")
            req.add_header("Accept", "*/*")
            req.add_header("Content-Type", "application/x-www-form-urlencoded")
            req.add_header("Accept-Encoding", "gzip,deflate")
            req.add_header("Accept-Language", "en-us,en;q=0.8")
            req.add_header("Cookie", "os=pc")
            req.add_header("Cookie", "appver=1.9.1.103164")
            if cookie is not None:
                req.add_header("Cookie", cookie)
                print "Cookie=" + cookie

            response = urllib2.urlopen(req)
            httpdata = response.read()
            if response.headers.get('content-encoding', None) == 'gzip':
                httpdata = gzip.GzipFile(fileobj=StringIO.StringIO(httpdata)).read()
            response.close()
            httpdata = json.loads(httpdata)
            if len(httpdata) == 0:
                return None
            return httpdata
        except:
            return None

    def getHtml(self, url):
        print url
        html = urllib2.urlopen(url).read()
        return html

    def getHot(self):
        data = "params=0BD8BB39A78692F1744DEFF63EBC30F7D84D90A907564A2A0BDA7895CF178DFD0CAEEC47033967A62CD4ECD51913FE164C3556D7EFCBD3AC00931DABF2730605BC25D19B433569DA9ABCB037586733BEED9E1BCA3A033E395BCA409263DD0B58C98E2D3B4B0B2EDA2B14D0109B0139B573A95B97CCCAE269A9322C0EAAFF65C6C05795F728A8A66B3AA3316B7539DCAB1776DE31E2BEAC1FE7A8C322B8B18231E31566BFCFBDCCF0557EF71AE97B7D2ECCAD7053DB1E18FC8DBCBFC7DDA36FC8EDF912F350DB61DB43924776FE9729C3D2F2C2BB33F639DD8A005859D642BCB0491C1931BE98A20E9132C91831B5CE8D1592EFDBED2FA4BB612DD34C3BE69C1C8669C1DCE2A45A7DACAFA0C320D13BB2EB40E36C10F4AD0CADF24B3F5C40D40F152F32E6041176216F221644F252FAB3E073B57F8D23D35549EF922F329ADFDAACB78BE9DFF1AEF75833F700230F1E12EBCA6B9B346C712AF8691BF1C4CBDCAEE5219A0783DC8BFCEC59297BAC74057B621C16B73A41192D42970C8AD7E2F0662E5E4F2983FE74A04E3331EFFC113B3A67CABC388CC11D42ABA76739691BFBF17732665441D1CFFB12AEDF0639D8ADCFC1C854A38162C4E3DE2277D7C8D5EF75550BB6311A3C54454992CA133C2786E140224611DFCD0C86D740FE0A1048D78B34AA69385D5DBFD5F1F9BD9730F86467248E82087990ECB5C729BDC88FE3FCE6E9E693F6CB81A53455E36A7F4AAB6BF8E5EA77E69589A032FDBDB13659D0E2D7D59643516766877A7251090C3985E493F181E25EACF495E1883EECF1566FCD98296D3ACB7471D7A7458909B7B9795E37%00"
        url = "http://music.163.com/eapi/batch"
        respData = self.postHttpData(url, data)
        return respData

    def getPlaylist(self, cat="全部", order="hot", offset=0, limit=35):
        url = "http://music.163.com/discover/playlist/" + "?order=" + order + "&cat=" + cat + "&limit=" + str(limit) + "&offset=" + str(offset)
        html = self.getHtml(url)
        if html != None:
            playlist = {"code": 200, "playlists": []}
            soup = BeautifulSoup(html)
            liArr = soup.select("#m-pl-container > li")
            for li in liArr:
                img = li.img["src"]
                title = li.select(".msk")[0]["title"]
                id = li.select(".icon-play")[0]["data-res-id"]
                nb = li.select(".nb")[0].string
                playlist["playlists"].append({"name": title, "coverImgUrl": img, "id": id, "nb": nb})
            return playlist

    def getPlaylistDetail(self, id):
        url = "http://music.163.com/api/playlist/detail?id=" + id + "&updateTime=-1"
        data = self.getHttpData(url)
        return data

    def getToplist(self, id):
        url = "http://music.163.com/discover/toplist?id=" + str(id)
        html = self.getHtml(url)
        if html != None:
            startTag = '<textarea style="display:none;">'
            endTag = '</textarea>'
            start = html.index(startTag) + len(startTag)
            end = html.index(endTag)
            if end > start:
                print "start=" + str(start) + ",end=" + str(end)
                data = html[start:end]
                respData = json.loads(data)
                return respData
        return None

api = Api()
