from bs4 import BeautifulSoup
import requests

dp_headers = {
    # 'Host': 'http://www.dianping.com/',
    # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
    # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36',
    'Accept': 'image/webp,image/*,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Accept-Encoding': 'gzip, deflate',
    'Cache-Control': 'max-age=0',
    'Upgrade-Insecure-Requests': '1',
    'Referer': 'http://www.dianping.com/',
    'Cookie': '_lxsdk_cuid=1606c56b193c8-0b44ee210d23da-5d4e211f-100200-1606c56b194c8; _lxsdk=1606c56b193c8-0b44ee210d23da-5d4e211f-100200-1606c56b194c8; _hc.v=39c11442-a651-baec-f19d-0473a4e3b7c5.1513646109; s_ViewType=10; _lxsdk_s=1606c56b196-f57-eb7-004%7C%7C8',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
}
url = 'http://www.dianping.com/shop/77439979'
res = requests.get(url, headers=dp_headers, timeout=3)
soup = BeautifulSoup(res.text, 'lxml')
shop_name = soup.find(class_='shop-name')
# print(shop_name)
if shop_name:
    shop_name = shop_name.get_text()
    print(shop_name.split()[0])
    shop_name = shop_name.split()[0]  # 只截取店铺名称
else:
    raise Exception('店铺名称爬取失败，店铺地址：%s' % url)
