'''

美团爬虫

'''
import requests
import execjs
from config import *
from urllib.parse import urlencode

def get_token( href ):

    js = open('./_token.js',mode='r',encoding='utf-8').read()

    cxt = execjs.compile( js )

    _token = cxt.call("get_token",href)

    return _token

def get_index( city = "沈阳"):

    #获取_token
    params = {

        "cityName":city,
        "cateId":"2",
        "areaId":"0",
        "sort":"",
        "dinnerCountAttrId":"",
        "page":"2",
        "userId":"",
        "uuid":"80e8b04b-0cfa-44aa-94b4-cb153148391e",
        "platform":"1",
        "partner":"126",
        "originUrl":"https://hf.meituan.com/meishi/c17/",
        "riskLevel":"1",
        "optimusCode":"10"
    }

    href = ""

    for k,v in params.items():

        href += ( k+"&"+v)

    href_data = domain["url"] + href

    _token = get_token( href_data )

    params["_token"] = _token

    url = domain["url"] + urlencode( params )

    r = requests.get( url = url ,headers=header )

    if r.status_code == 200:

        print( r.text )

def parse_index( html ):

    pass


if __name__ == '__main__':

    get_index()
