#!/usr/bin/env python
# coding: utf-8

# In[46]:


import jieba
import pandas as pd 
import requests
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
from pyecharts.charts import Line,Pie,Scatter,Bar,Map,Grid
from pyecharts.charts import WordCloud
from pyecharts import options as opts
from pyecharts.globals import ThemeType
from pyecharts.globals import SymbolType
from pyecharts.commons.utils import JsCode
from pyecharts.charts import *

# In[29]:


df = pd.read_excel(r'C:\Users\admin\Desktop\旅游景点.xlsx')
df.head()


# In[30]:


df = df[df['销量']!=0] 


# In[31]:


df['星级'].fillna('未知', inplace=True)
df.isnull().sum()


# In[32]:


df.fillna('未知', inplace=True)
df.isnull().sum()


# In[34]:


def xiaoliang20():
    color_js = """new echarts.graphic.LinearGradient(0, 0, 1, 0,
    [{offset: 0, color: '#FF4500'}, {offset: 1, color: '#98F898'}], false)"""
    sort_info = df.sort_values(by='销量', ascending=True)
    b1 = (
    Bar()
    .add_xaxis(list(sort_info['名称'])[-20:])
    .add_yaxis('热门景点销量', sort_info['销量'].values.tolist()[-20:],itemstyle_opts=opts.ItemStyleOpts(color=JsCode(color_js)))
    .reversal_axis()
    .set_global_opts(
        title_opts=opts.TitleOpts(title='热门景点销量数据'),
        yaxis_opts=opts.AxisOpts(name='景点名称'),
        xaxis_opts=opts.AxisOpts(name='销量'),
        )
    .set_series_opts(label_opts=opts.LabelOpts(position="right"))
    )
    # 将图形整体右移
    g1 = (
    Grid()
        .add(b1, grid_opts=opts.GridOpts(pos_left='20%', pos_right='5%'))  
    )
    x=g1.render_notebook()
    return x




# In[35]:


xiaoliang20()


# In[36]:


def jiaqichuxing_map():
    df_tmp1 = df[['城市','销量']]
    df_counts = df_tmp1.groupby('城市').sum()
    m1 = (
        Map()
        .add('假期出行分布', [list(z) for z in zip(df_counts.index.values.tolist(), df_counts.values.tolist())], 'china')
        .set_global_opts(
        title_opts=opts.TitleOpts(title='假期出行数据地图分布'),
        visualmap_opts=opts.VisualMapOpts(max_=100000, is_piecewise=False,range_color=["white", "#fa8072", "#ed1941"]),
        )
    )
    m1.render()
    m=m1.render()
    
    return m


# In[37]:


jiaqichuxing_map()


# In[38]:


def xingjijingdian():
    df_tmp2 = df[df['星级'].isin(['4A', '5A'])]
    df_counts = df_tmp2.groupby('城市').count()['星级']
    df0 = df_counts.copy()
    df0.sort_values(ascending=False, inplace=True)
    c1 = (
    Pie()
    .add('', [list(z) for z in zip(df0.index.values.tolist(), df0.values.tolist())],
         radius=['30%', '100%'],
         center=['50%', '60%'],
         rosetype='area',
         )
    .set_global_opts(title_opts=opts.TitleOpts(title='各省市4A-5A景点数量玫瑰图'),
                     legend_opts=opts.LegendOpts(is_show=False),
                     toolbox_opts=opts.ToolboxOpts())
    .set_series_opts(label_opts=opts.LabelOpts(is_show=True, position='inside', font_size=12,
                                               formatter='{b}: {c}', font_style='italic',
                                               font_weight='bold', font_family='Microsoft YaHei'
                                               ))
    )
    c1.render_notebook()
    x=c1.render_notebook()
    return x


# In[39]:


xingjijingdian()


# In[40]:


price_level = [0, 50, 100, 150, 200, 250, 300, 350, 400, 500]    
label_level = ['0-50', '50-100', '100-150', '150-200', '200-250', '250-300', '300-350', '350-400', '400-500']    
jzmj_cut = pd.cut(df['价格'], price_level, labels=label_level)        
df_price = jzmj_cut.value_counts()


# In[41]:


def jiage():
    color_js = """new echarts.graphic.RadialGradient(
                    0.5, 0.5, 1,
                    [{offset: 0,
                      color: '#0000CD'},
                     {offset: 1,
                      color: '#FFA07A'}
                      ])"""
    s2 = (
        Scatter()
        .add_xaxis(df_price.index.tolist())
        .add_yaxis('门票价格区间', df_price.values.tolist(),symbol_size=50,itemstyle_opts=opts.ItemStyleOpts(color=JsCode(color_js))) 
        .set_global_opts(
            yaxis_opts=opts.AxisOpts(name='数量'),
            xaxis_opts=opts.AxisOpts(name='价格区间(元)'))
        .set_global_opts(visualmap_opts=opts.VisualMapOpts(is_show=False, 
                                              # 设置通过图形大小来表现数据
                                              type_='size',
                                              # 图形大小映射范围
                                              range_size=[5,50]))
    )
    s2.render_notebook()
    g=s2.render_notebook()
    return g
 


# In[42]:


jiage()


# In[43]:


urllst = []
ui = 'https://travel.qunar.com/p-cs299914-beijing-jingdian-1-'
for i in range(1,21):
    urllst.append(ui +str(i))

    
#访问页面加解析
# 初步访问页面
u1 = urllst[1]
r = requests.get(u1)
soup = BeautifulSoup(r.text, 'lxml')
ul = soup.find('ul',class_="list_item clrfix")
li = ul.find_all('li')
li1 = li[0]
# 筛选第一个数据
dic = {}
dic['lat'] = li1['data-lat']
dic['lng'] = li1['data-lng']
dic['景点名称'] = li1.find('span',class_="cn_tit").text
dic['攻略提到数量'] = li1.find('div',class_="strategy_sum").text
dic['点评数量'] = li1.find('div',class_="comment_sum").text
dic['景点排名'] = li1.find('span',class_="ranking_sum").text
dic['星级'] = li1.find('span',class_="total_star").find('span')['style'].split(':')[1]
# 标签识别

datai = []
n=0
for i in li:
    n+=1
    dic = {}
    dic['lat'] = i['data-lat']
    dic['lng'] = i['data-lng']
    dic['景点名称'] = i.find('span',class_="cn_tit").text
    dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
    dic['点评数量'] = i.find('div',class_="comment_sum").text
    dic['景点排名'] = i.find('span',class_="ranking_sum").text
    dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
    datai.append(dic)
# 分别获取字段内容
        
datai[:2]
datai = []
n=0
for ui in urllst:
    r = requests.get(ui)
    soup = BeautifulSoup(r.text, 'lxml')
        # 访问数据
    ul = soup.find('ul',class_="list_item clrfix")
    li = ul.find_all('li')
        # 解析标签
    for i in li:
        n+=1
        dic = {}
        dic['lat'] = i['data-lat']
        dic['lng'] = i['data-lng']
        dic['景点名称'] = i.find('span',class_="cn_tit").text
        dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
        dic['点评数量'] = i.find('div',class_="comment_sum").text
        dic['景点排名'] = i.find('span',class_="ranking_sum").text
        dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
        datai.append(dic)
        #print('成功采集%i条数据' % n)
        # 分别获取字段内容
datai[:5]

df = pd.DataFrame(datai)
df.sort_values('星级',ascending=False)


# In[44]:


df['景点排名'].replace('',np.nan,inplace=True)
df.dropna(subset=['景点排名'], inplace=True)
df['rn']=df['景点排名'].apply(lambda x:int(re.sub("\D","",x)))
dat=df.sort_values('rn')
db=dat[dat['rn']<10]
db.reset_index(drop=True)


# In[50]:


fen=[db.iloc[i]['景点名称']for i in range(db.shape[0])]
lng=[db.iloc[i]['lng'] for i in range(db.shape[0])]
lat=[db.iloc[i]['lat'] for i in range(db.shape[0])]
value=[db.iloc[i]['点评数量'] for i in range(db.shape[0])]


# In[52]:


def beijingsandian():
    def geo_add_custom_coordinate():
        geo = Geo(init_opts=opts.InitOpts(theme='light',
                                      width='1000px',
                                      height='600px',
                                      bg_color = '#EEEEE8'))
        for i in range(len(db)):
                geo.add_coordinate(fen[i], lng[i],lat[i])
                geo.add_schema(maptype="北京")
                # 为自定义的点添加属性
                geo.add("",[(fen[i], value[i])],type_='effectScatter')#type_='heatmap' 热力图 #type_='effectScatter' 涟漪散点图
                geo.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
                geo.set_global_opts(title_opts=opts.TitleOpts(title="北京旅游景点涟漪散点图"),visualmap_opts=opts.VisualMapOpts())
        return geo
    chart = geo_add_custom_coordinate()
    chart.render_notebook()
    b=chart.render_notebook()
    return b


# In[53]:


beijingsandian()


# In[54]:


urllst = []
ui = 'https://travel.qunar.com/p-cs300132-guangzhou-jingdian-1-'
for i in range(1,21):
    urllst.append(ui +str(i))

    
#访问页面加解析
# 初步访问页面
u1 = urllst[1]
r = requests.get(u1)
soup = BeautifulSoup(r.text, 'lxml')
ul = soup.find('ul',class_="list_item clrfix")
li = ul.find_all('li')
li1 = li[0]
# 筛选第一个数据
dic = {}
dic['lat'] = li1['data-lat']
dic['lng'] = li1['data-lng']
dic['景点名称'] = li1.find('span',class_="cn_tit").text
dic['攻略提到数量'] = li1.find('div',class_="strategy_sum").text
dic['点评数量'] = li1.find('div',class_="comment_sum").text
dic['景点排名'] = li1.find('span',class_="ranking_sum").text
dic['星级'] = li1.find('span',class_="total_star").find('span')['style'].split(':')[1]
# 标签识别

datai = []
n=0
for i in li:
    n+=1
    dic = {}
    dic['lat'] = i['data-lat']
    dic['lng'] = i['data-lng']
    dic['景点名称'] = i.find('span',class_="cn_tit").text
    dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
    dic['点评数量'] = i.find('div',class_="comment_sum").text
    dic['景点排名'] = i.find('span',class_="ranking_sum").text
    dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
    datai.append(dic)
# 分别获取字段内容
        
datai[:2]
datai = []
n=0
for ui in urllst:
    r = requests.get(ui)
    soup = BeautifulSoup(r.text, 'lxml')
        # 访问数据
    ul = soup.find('ul',class_="list_item clrfix")
    li = ul.find_all('li')
        # 解析标签
    for i in li:
        n+=1
        dic = {}
        dic['lat'] = i['data-lat']
        dic['lng'] = i['data-lng']
        dic['景点名称'] = i.find('span',class_="cn_tit").text
        dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
        dic['点评数量'] = i.find('div',class_="comment_sum").text
        dic['景点排名'] = i.find('span',class_="ranking_sum").text
        dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
        datai.append(dic)
        #print('成功采集%i条数据' % n)
        # 分别获取字段内容
datai[:5]

df = pd.DataFrame(datai)
df.sort_values('星级',ascending=False)


# In[55]:


df['景点排名'].replace('',np.nan,inplace=True)
df.dropna(subset=['景点排名'], inplace=True)
df['rn']=df['景点排名'].apply(lambda x:int(re.sub("\D","",x)))
dat=df.sort_values('rn')
db=dat[dat['rn']<10]
db.reset_index(drop=True)


# In[56]:


fen=[db.iloc[i]['景点名称']for i in range(db.shape[0])]
lng=[db.iloc[i]['lng'] for i in range(db.shape[0])]
lat=[db.iloc[i]['lat'] for i in range(db.shape[0])]
value=[db.iloc[i]['点评数量'] for i in range(db.shape[0])]


# In[57]:


def guangzhousandian():
    def geo_add_custom_coordinate():
        geo = Geo(init_opts=opts.InitOpts(theme='light',
                                      width='1000px',
                                      height='600px',
                                      bg_color = '#EEEEE8'))
        for i in range(len(db)):
                geo.add_coordinate(fen[i], lng[i],lat[i])
                geo.add_schema(maptype="广州")
                # 为自定义的点添加属性
                geo.add("",[(fen[i], value[i])],type_='effectScatter')#type_='heatmap' 热力图 #type_='effectScatter' 涟漪散点图
                geo.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
                geo.set_global_opts(title_opts=opts.TitleOpts(title="广州旅游景点涟漪散点图"),visualmap_opts=opts.VisualMapOpts())
        return geo
    chart = geo_add_custom_coordinate()
    chart.render_notebook()
    g=chart.render_notebook()
    return g


# In[58]:


guangzhousandian()


# In[59]:


urllst = []
ui = 'https://travel.qunar.com/p-cs299878-shanghai-jingdian-1-'
for i in range(1,21):
    urllst.append(ui +str(i))

    
#访问页面加解析
# 初步访问页面
u1 = urllst[1]
r = requests.get(u1)
soup = BeautifulSoup(r.text, 'lxml')
ul = soup.find('ul',class_="list_item clrfix")
li = ul.find_all('li')
li1 = li[0]
# 筛选第一个数据
dic = {}
dic['lat'] = li1['data-lat']
dic['lng'] = li1['data-lng']
dic['景点名称'] = li1.find('span',class_="cn_tit").text
dic['攻略提到数量'] = li1.find('div',class_="strategy_sum").text
dic['点评数量'] = li1.find('div',class_="comment_sum").text
dic['景点排名'] = li1.find('span',class_="ranking_sum").text
dic['星级'] = li1.find('span',class_="total_star").find('span')['style'].split(':')[1]
# 标签识别

datai = []
n=0
for i in li:
    n+=1
    dic = {}
    dic['lat'] = i['data-lat']
    dic['lng'] = i['data-lng']
    dic['景点名称'] = i.find('span',class_="cn_tit").text
    dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
    dic['点评数量'] = i.find('div',class_="comment_sum").text
    dic['景点排名'] = i.find('span',class_="ranking_sum").text
    dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
    datai.append(dic)
# 分别获取字段内容
        
datai[:2]
datai = []
n=0
for ui in urllst:
    r = requests.get(ui)
    soup = BeautifulSoup(r.text, 'lxml')
        # 访问数据
    ul = soup.find('ul',class_="list_item clrfix")
    li = ul.find_all('li')
        # 解析标签
    for i in li:
        n+=1
        dic = {}
        dic['lat'] = i['data-lat']
        dic['lng'] = i['data-lng']
        dic['景点名称'] = i.find('span',class_="cn_tit").text
        dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
        dic['点评数量'] = i.find('div',class_="comment_sum").text
        dic['景点排名'] = i.find('span',class_="ranking_sum").text
        dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
        datai.append(dic)
        #print('成功采集%i条数据' % n)
        # 分别获取字段内容
datai[:5]

df = pd.DataFrame(datai)
df.sort_values('星级',ascending=False)


# In[60]:


df['景点排名'].replace('',np.nan,inplace=True)
df.dropna(subset=['景点排名'], inplace=True)
df['rn']=df['景点排名'].apply(lambda x:int(re.sub("\D","",x)))
dat=df.sort_values('rn')
db=dat[dat['rn']<10]
db.reset_index(drop=True)


# In[61]:


fen=[db.iloc[i]['景点名称']for i in range(db.shape[0])]
lng=[db.iloc[i]['lng'] for i in range(db.shape[0])]
lat=[db.iloc[i]['lat'] for i in range(db.shape[0])]
value=[db.iloc[i]['点评数量'] for i in range(db.shape[0])]


# In[72]:


def shanghaisandian():
    def geo_add_custom_coordinate():
        geo = Geo(init_opts=opts.InitOpts(theme='light',
                                      width='1000px',
                                      height='600px',
                                      bg_color = '#EEEEE8'))
        for i in range(len(db)):
                geo.add_coordinate(fen[i], lng[i],lat[i])
                geo.add_schema(maptype="上海")
                # 为自定义的点添加属性
                geo.add("",[(fen[i], value[i])],type_='effectScatter')#type_='heatmap' 热力图 #type_='effectScatter' 涟漪散点图
                geo.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
                geo.set_global_opts(title_opts=opts.TitleOpts(title="上海旅游景点涟漪散点图"),visualmap_opts=opts.VisualMapOpts())
        return geo
    chart = geo_add_custom_coordinate()
    chart.render_notebook()
    h=chart.render_notebook()
    return h


# In[73]:


shanghaisandian()


# In[64]:


urllst = []
ui = 'https://travel.qunar.com/p-cs300100-xian-jingdian-1-'
for i in range(1,21):
    urllst.append(ui +str(i))

    
#访问页面加解析
# 初步访问页面
u1 = urllst[1]
r = requests.get(u1)
soup = BeautifulSoup(r.text, 'lxml')
ul = soup.find('ul',class_="list_item clrfix")
li = ul.find_all('li')
li1 = li[0]
# 筛选第一个数据
dic = {}
dic['lat'] = li1['data-lat']
dic['lng'] = li1['data-lng']
dic['景点名称'] = li1.find('span',class_="cn_tit").text
dic['攻略提到数量'] = li1.find('div',class_="strategy_sum").text
dic['点评数量'] = li1.find('div',class_="comment_sum").text
dic['景点排名'] = li1.find('span',class_="ranking_sum").text
dic['星级'] = li1.find('span',class_="total_star").find('span')['style'].split(':')[1]
# 标签识别

datai = []
n=0
for i in li:
    n+=1
    dic = {}
    dic['lat'] = i['data-lat']
    dic['lng'] = i['data-lng']
    dic['景点名称'] = i.find('span',class_="cn_tit").text
    dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
    dic['点评数量'] = i.find('div',class_="comment_sum").text
    dic['景点排名'] = i.find('span',class_="ranking_sum").text
    dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
    datai.append(dic)
# 分别获取字段内容
        
datai[:2]
datai = []
n=0
for ui in urllst:
    r = requests.get(ui)
    soup = BeautifulSoup(r.text, 'lxml')
        # 访问数据
    ul = soup.find('ul',class_="list_item clrfix")
    li = ul.find_all('li')
        # 解析标签
    for i in li:
        n+=1
        dic = {}
        dic['lat'] = i['data-lat']
        dic['lng'] = i['data-lng']
        dic['景点名称'] = i.find('span',class_="cn_tit").text
        dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
        dic['点评数量'] = i.find('div',class_="comment_sum").text
        dic['景点排名'] = i.find('span',class_="ranking_sum").text
        dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
        datai.append(dic)
        #print('成功采集%i条数据' % n)
        # 分别获取字段内容
datai[:5]

df = pd.DataFrame(datai)
df.sort_values('星级',ascending=False)


# In[65]:


df['景点排名'].replace('',np.nan,inplace=True)
df.dropna(subset=['景点排名'], inplace=True)
df['rn']=df['景点排名'].apply(lambda x:int(re.sub("\D","",x)))
dat=df.sort_values('rn')
db=dat[dat['rn']<10]
db.reset_index(drop=True)


# In[66]:


fen=[db.iloc[i]['景点名称']for i in range(db.shape[0])]
lng=[db.iloc[i]['lng'] for i in range(db.shape[0])]
lat=[db.iloc[i]['lat'] for i in range(db.shape[0])]
value=[db.iloc[i]['点评数量'] for i in range(db.shape[0])]


# In[74]:


def xiansandian():
    def geo_add_custom_coordinate():
        geo = Geo(init_opts=opts.InitOpts(theme='light',
                                      width='1000px',
                                      height='600px',
                                      bg_color = '#EEEEE8'))
        for i in range(len(db)):
                geo.add_coordinate(fen[i], lng[i],lat[i])
                geo.add_schema(maptype="西安")
                # 为自定义的点添加属性
                geo.add("",[(fen[i], value[i])],type_='effectScatter')#type_='heatmap' 热力图 #type_='effectScatter' 涟漪散点图
                geo.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
                geo.set_global_opts(title_opts=opts.TitleOpts(title="西安旅游景点涟漪散点图"),visualmap_opts=opts.VisualMapOpts())
        return geo
    chart = geo_add_custom_coordinate()
    chart.render_notebook()
    a=chart.render_notebook()
    return a


# In[75]:


xiansandian()


# In[69]:


urllst = []
ui = 'https://travel.qunar.com/p-cs300079-lijiang-jingdian-1-'
for i in range(1,21):
    urllst.append(ui +str(i))

    
#访问页面加解析
# 初步访问页面
u1 = urllst[1]
r = requests.get(u1)
soup = BeautifulSoup(r.text, 'lxml')
ul = soup.find('ul',class_="list_item clrfix")
li = ul.find_all('li')
li1 = li[0]
# 筛选第一个数据
dic = {}
dic['lat'] = li1['data-lat']
dic['lng'] = li1['data-lng']
dic['景点名称'] = li1.find('span',class_="cn_tit").text
dic['攻略提到数量'] = li1.find('div',class_="strategy_sum").text
dic['点评数量'] = li1.find('div',class_="comment_sum").text
dic['景点排名'] = li1.find('span',class_="ranking_sum").text
dic['星级'] = li1.find('span',class_="total_star").find('span')['style'].split(':')[1]
# 标签识别

datai = []
n=0
for i in li:
    n+=1
    dic = {}
    dic['lat'] = i['data-lat']
    dic['lng'] = i['data-lng']
    dic['景点名称'] = i.find('span',class_="cn_tit").text
    dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
    dic['点评数量'] = i.find('div',class_="comment_sum").text
    dic['景点排名'] = i.find('span',class_="ranking_sum").text
    dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
    datai.append(dic)
# 分别获取字段内容
        
datai[:2]
datai = []
n=0
for ui in urllst:
    r = requests.get(ui)
    soup = BeautifulSoup(r.text, 'lxml')
        # 访问数据
    ul = soup.find('ul',class_="list_item clrfix")
    li = ul.find_all('li')
        # 解析标签
    for i in li:
        n+=1
        dic = {}
        dic['lat'] = i['data-lat']
        dic['lng'] = i['data-lng']
        dic['景点名称'] = i.find('span',class_="cn_tit").text
        dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
        dic['点评数量'] = i.find('div',class_="comment_sum").text
        dic['景点排名'] = i.find('span',class_="ranking_sum").text
        dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
        datai.append(dic)
        #print('成功采集%i条数据' % n)
        # 分别获取字段内容
datai[:5]

df = pd.DataFrame(datai)
df.sort_values('星级',ascending=False)


# In[70]:


df['景点排名'].replace('',np.nan,inplace=True)
df.dropna(subset=['景点排名'], inplace=True)
df['rn']=df['景点排名'].apply(lambda x:int(re.sub("\D","",x)))
dat=df.sort_values('rn')
db=dat[dat['rn']<10]
db.reset_index(drop=True)


# In[71]:


fen=[db.iloc[i]['景点名称']for i in range(db.shape[0])]
lng=[db.iloc[i]['lng'] for i in range(db.shape[0])]
lat=[db.iloc[i]['lat'] for i in range(db.shape[0])]
value=[db.iloc[i]['点评数量'] for i in range(db.shape[0])]


# In[77]:


def lijiangsandian():
    def geo_add_custom_coordinate():
        geo = Geo(init_opts=opts.InitOpts(theme='light',
                                      width='1000px',
                                      height='600px',
                                      bg_color = '#EEEEE8'))
        for i in range(len(db)):
                geo.add_coordinate(fen[i], lng[i],lat[i])
                geo.add_schema(maptype="丽江")
                # 为自定义的点添加属性
                geo.add("",[(fen[i], value[i])],type_='effectScatter')#type_='heatmap' 热力图 #type_='effectScatter' 涟漪散点图
                geo.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
                geo.set_global_opts(title_opts=opts.TitleOpts(title="丽江旅游景点涟漪散点图"),visualmap_opts=opts.VisualMapOpts())
        return geo
    chart = geo_add_custom_coordinate()
    chart.render_notebook()
    l=chart.render_notebook()
    return l


# In[78]:


lijiangsandian()


# In[79]:


urllst = []
ui = 'https://travel.qunar.com/p-cs299979-chongqing-jingdian-1-'
for i in range(1,21):
    urllst.append(ui +str(i))

    
#访问页面加解析
# 初步访问页面
u1 = urllst[1]
r = requests.get(u1)
soup = BeautifulSoup(r.text, 'lxml')
ul = soup.find('ul',class_="list_item clrfix")
li = ul.find_all('li')
li1 = li[0]
# 筛选第一个数据
dic = {}
dic['lat'] = li1['data-lat']
dic['lng'] = li1['data-lng']
dic['景点名称'] = li1.find('span',class_="cn_tit").text
dic['攻略提到数量'] = li1.find('div',class_="strategy_sum").text
dic['点评数量'] = li1.find('div',class_="comment_sum").text
dic['景点排名'] = li1.find('span',class_="ranking_sum").text
dic['星级'] = li1.find('span',class_="total_star").find('span')['style'].split(':')[1]
# 标签识别

datai = []
n=0
for i in li:
    n+=1
    dic = {}
    dic['lat'] = i['data-lat']
    dic['lng'] = i['data-lng']
    dic['景点名称'] = i.find('span',class_="cn_tit").text
    dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
    dic['点评数量'] = i.find('div',class_="comment_sum").text
    dic['景点排名'] = i.find('span',class_="ranking_sum").text
    dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
    datai.append(dic)
# 分别获取字段内容
        
datai[:2]
datai = []
n=0
for ui in urllst:
    r = requests.get(ui)
    soup = BeautifulSoup(r.text, 'lxml')
        # 访问数据
    ul = soup.find('ul',class_="list_item clrfix")
    li = ul.find_all('li')
        # 解析标签
    for i in li:
        n+=1
        dic = {}
        dic['lat'] = i['data-lat']
        dic['lng'] = i['data-lng']
        dic['景点名称'] = i.find('span',class_="cn_tit").text
        dic['攻略提到数量'] = i.find('div',class_="strategy_sum").text
        dic['点评数量'] = i.find('div',class_="comment_sum").text
        dic['景点排名'] = i.find('span',class_="ranking_sum").text
        dic['星级'] = i.find('span',class_="total_star").find('span')['style'].split(':')[1]
        datai.append(dic)
        #print('成功采集%i条数据' % n)
        # 分别获取字段内容
datai[:5]

df = pd.DataFrame(datai)
df.sort_values('星级',ascending=False)


# In[80]:


df['景点排名'].replace('',np.nan,inplace=True)
df.dropna(subset=['景点排名'], inplace=True)
df['rn']=df['景点排名'].apply(lambda x:int(re.sub("\D","",x)))
dat=df.sort_values('rn')
db=dat[dat['rn']<10]
db.reset_index(drop=True)


# In[81]:


fen=[db.iloc[i]['景点名称']for i in range(db.shape[0])]
lng=[db.iloc[i]['lng'] for i in range(db.shape[0])]
lat=[db.iloc[i]['lat'] for i in range(db.shape[0])]
value=[db.iloc[i]['点评数量'] for i in range(db.shape[0])]


# In[83]:


def chongqingsandian():
    def geo_add_custom_coordinate():
        geo = Geo(init_opts=opts.InitOpts(theme='light',
                                      width='1000px',
                                      height='600px',
                                      bg_color = '#EEEEE8'))
        for i in range(len(db)):
                geo.add_coordinate(fen[i], lng[i],lat[i])
                geo.add_schema(maptype="重庆")
                # 为自定义的点添加属性
                geo.add("",[(fen[i], value[i])],type_='effectScatter')#type_='heatmap' 热力图 #type_='effectScatter' 涟漪散点图
                geo.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
                geo.set_global_opts(title_opts=opts.TitleOpts(title="重庆旅游景点涟漪散点图"),visualmap_opts=opts.VisualMapOpts())
        return geo
    chart = geo_add_custom_coordinate()
    chart.render_notebook()
    c=chart.render_notebook()
    return c


# In[84]:


chongqingsandian()


# In[ ]:




