# coding=utf-8
import importlib
import sys
importlib.reload(sys)
import urllib.parse
from bs4 import BeautifulSoup
from folium.plugins import HeatMap
import webbrowser
import json
import urllib3
import urllib
import pymongo
import pandas as pd
from matplotlib import pyplot as plt
import json
import folium
##mongoDB operate
from wordcloud import WordCloud,STOPWORDS
def createFortunedb():
    myClient = pymongo.MongoClient("127.0.0.1",27017)
    fortunedb = myClient['fortunedb']
    mycol = fortunedb['ChinaRank2018']

def getConn(dbName,colName):
    myClient = pymongo.MongoClient("127.0.0.1", 27017)
    fortunedb = myClient[dbName]
    myCol = fortunedb[colName]
    return myCol

def getChinaFortuneRank(url):
    http = urllib3.PoolManager()
    page = http.request("GET",url)
    print(page.status)
    print(page.data.decode('utf-8'))
    page = page.data.decode('utf-8')
    soup = BeautifulSoup(page,"lxml")
    companys = soup.find("tbody").find_all("tr")
    col = getConn("fortunedb", "ChinaRank2018")
    for company in companys:
        vals = company.find_all("td")
        saveRankData(col,vals)
        # print("*************")
        # print(company)

def getCompanyLink(url,headers):
    http = urllib3.PoolManager()
    page = http.request("GET", url, headers)
    print(page.status)
    print(page.data.decode('utf-8'))
    page = page.data.decode('utf-8')
    soup = BeautifulSoup(page, "lxml")
    companys = soup.find("tbody").find_all("tr")
    # col = getConn("fortunedb", "ChinaRank2018")
    rootWebside = "http://www.fortunechina.com/"
    count = 1
    for company in companys:
        count += 1
        if(count>72):
            print(count)
            vals = company.find_all("td")
            name = vals[2].get_text().strip()
            # print(name)
            postfix = vals[2].find("a")['href'][12:]
            cwebside = rootWebside+postfix
            print(cwebside)
            getCompanyData(cwebside,headers,name)
            # saveRankData(col, vals)

def saveRankData(col,vals):
    myDict = {"year":2018,"rank":float(vals[0].string),"name":vals[2].a.string,"income":float(vals[3].string),"profit":float(vals[4].string)}
    x = col.insert_one(myDict)
    print(x.inserted_id)

def getRank(dbName,colName,fieldName):
    myCol = getConn(dbName,colName)
    rankList = myCol.find().sort(fieldName,-1)
    index = 1
    for x in rankList:
        print(index,x['name'],x['income'],x['profit'])
        index += 1
#get company's normal info and earning info
def getCompanyData(url,headers,name):
    http = urllib3.PoolManager()
    page = http.request("GET",url,headers=headers)
    page = page.data.decode("utf-8")
    soup = BeautifulSoup(page,"lxml")
    # col = getConn("fortunedb", "ChinaCompanyData2018")
    companyInfo = soup.find("div",class_="thisyeardata").find_all("span",class_='txt-14')
    earningInfo = soup.select('table[cellpadding="3"]')
    # print(earningInfo[0])
    store = soup.select('td[align="left"]')[0].get_text().strip()[5:]
    # market = store[5:]
    # marketID = store[7:13]
    # print(market)
    # print(marketID)
    col = getConn("fortunedb", "ChinaCompany2018")
    saveCompanyData(col,store,companyInfo,name)
    # print(companyInfo[2].string.strip())
    # print(companyInfo[4].string.strip())
    # print(companyInfo[6].string.strip())
    # print(float(companyInfo[8].string.strip().replace(',','')))
    # print(float(companyInfo[14].string.strip().replace(',','')))
    # print(float(companyInfo[17].string.strip().replace(',','')))
    # print(float(companyInfo[20].string.strip().replace(',','')))
    # print(float(companyInfo[23].string.strip().replace(',','')))
    # print(float(companyInfo[26].string.strip().replace(',','')))

def saveCompanyData(col,store,companyInfo,name):
    staff = companyInfo[8].string.strip().replace(',','')
    #attention:the staff field in the webside may be null,which represents as '--',need to be treated alone.
    if staff=='--':
        staff = 0
    CompanyDict = {"name":name,
                    "address":companyInfo[2].string.strip(),"chairman":companyInfo[4].string.strip(),
                   "postcode":companyInfo[6].string.strip(),"staff":float(staff),
                   "revenue":float(companyInfo[14].string.strip().replace(',','')),
                   "profit":float(companyInfo[17].string.strip().replace(',','')),
                   "netasset":float(companyInfo[20].string.strip().replace(',','')),
                   "asset":float(companyInfo[23].string.strip().replace(',','')),
                   "marketvalue":float(companyInfo[26].string.strip().replace(',','')),
                   "store":store,"year":2018}
    print(CompanyDict)
    col.insert_one(CompanyDict)

#get the size of every industry
def getindustry(url,headers):
    http = urllib3.PoolManager()
    page = http.request("GET",url,headers=headers)
    page = page.data.decode("utf-8")
    soup = BeautifulSoup(page,"lxml")
    industrys = soup.find("table",class_='rankingtable').find_all("td",class_='f500c2')
    return industrys

def addCompanyIndustryVal(url,headers,year):
    industrys = getindustry(url,headers)
    prefix = "http://www.fortunechina.com/fortune500/c/2018-07/10/"
    for industry in industrys:
        # print(industry.string.strip())
        indusurl = industry.find("a")['href'].strip()
        indusurl = prefix + indusurl
        print(indusurl)
        # save industry information
        col = getConn('fortunedb', 'ChinaIndustry2018')
        industryDict = {"industry": industry.string.strip(), "year": 2018}
        col.insert_one(industryDict)
        # save industry company information
        col = getConn('fortunedb', "ChinaIndustryCompany2018")
        getIndustryCompany(indusurl,industry.string.strip(),col,headers,year)

#get every industry's company list
def getIndustryCompany(url,industry,col,headers,year):
    http = urllib3.PoolManager()
    page = http.request("GET",url,headers=headers)
    page = page.data.decode("utf-8")
    soup = BeautifulSoup(page,"lxml")
    companys = soup.find("table",class_='rankingtable').find_all('td',class_='f500c3')
    for company in companys:
        print(company.string.strip())
        saveIndustryCompany(col,company,industry,year)
    return companys,industry

#save industry and company relationship
def saveIndustryCompany(col,company,industry,year):
    indusCompDict = {"industry":industry,"name":company.string.strip(),"year":year}
    col.insert_one(indusCompDict)

#set the request's headers
# Host = "www.fortunechina.com"
User_Agent_Phone = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Mobile Safari/537.36"
User_Agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
Accept = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
Connection = "keep-alive"
Referer = "http://www.fortunechina.com/fortune500/node_4302.htm"
headers = {"User-Agent":User_Agent,"Accept":Accept,"Connection":"keep-alive"}


#single china company url
url_company = "http://www.fortunechina.com/china500/29/2018a"
#total china companys url
url = "http://www.fortunechina.com/fortune500/c/2018-07/10/content_309961.htm"
#industry url
url_industry = "http://www.fortunechina.com/fortune500/c/2018-07/10/content_309960.htm"
#company list in a single industry
url_industry_companylist = "http://www.fortunechina.com/fortune500/c/2018-07/10/content_310067.htm"
getChinaFortuneRank(url)

getCompanyData(url,headers)

######################################################################################################
#upload the data in mongodb by pandas
def loaddata(query,dbName,dbColumn):
    dataCol = getConn(dbName,dbColumn)
    cursor = {}
    if query!='':
        cursor = dataCol.find(query)
    else:
        cursor = dataCol.find()
    return pd.DataFrame(list(cursor))

chinaCompany2018 = loaddata('','fortunedb','ChinaCompany2018')
chinaIndustryCompany2018 = loaddata('','fortunedb','ChinaIndustryCompany2018')
chinaCompany2018 = chinaCompany2018.merge(chinaIndustryCompany2018,left_on='name',right_on='name',how='left')

#profit rank
profitRank2018 = chinaCompany2018.sort_values(by='profit',ascending=False)
revenueRank2018 = chinaCompany2018.sort_values(by='revenue',ascending=False)

#chairman's first name rank
def getFirstNameWordCloud(datas):
    cols = ['name']
    nameCount = pd.DataFrame(columns=cols)
    for i in range(len(datas)):
        nameCount.loc[i] = datas.loc[i]['chairman'][:1]
    nameCount = nameCount['name'].value_counts()
    font = r'C:\Windows\Fonts\msyh.ttc'
    namewc = WordCloud(font_path=font,scale=5,max_words=1000,colormap='rainbow').generate(" 姓".join(nameCount.keys()))
    plt.figure(figsize=(12,15))
    plt.imshow(namewc,interpolation='bilinear')
    plt.axis("off")
    plt.title("China Top 500 chariman's first name")
    plt.show()

getFirstNameWordCloud(chinaCompany2018)

###########################################################
#get the province city info from address
def getPositionByCity(companys):
    if("市" in companys['address']):
        if('省' in companys['address']):
            return companys['address'].split('省')[1].split('市')[0]
        elif(companys['address'][0:2]=='中国'):
            return companys['address'].split('中国')[1].split('市')[0]
        elif('自治区' in companys['address']):
            return companys['address'].split('自治区')[1].split('市')[0]
        else:
            return companys['address'].split('市')[0]
    else:
        if('香港' in companys['address']):
            return "香港"
        else:
            return "其他"

def getPositionByProvince(companys):
    if("省" in companys['address']):
       return companys['address'].split('省')[0]
    if('北京'in companys['address']):
        return '北京'
    elif('上海' in companys['address']):
        return '上海'
    elif('香港'in companys['address']):
        return '香港'
    elif ('重庆' in companys['address']):
        return '重庆'
    elif ('天津' in companys['address']):
        return '天津'
    elif('自治区' in companys['address']):
        return companys['address'].split('自治区')[0]+'自治区'
    else:
        return '其他'

def getLocation(name,id,position,province,headers):
    print(name)
    bdurl = 'http://api.map.baidu.com/geocoder/v2/?address='
    output= 'json'
    ak = 'rHMG1pFGhG0QZGY8ar9qiQE76WIbl5Dm'
    callback = 'showLocation'
    url = bdurl+"%s"%(urllib.parse.quote(position))+"&output="+output+"&ak="+ak
    print(url)
    http = urllib3.PoolManager()
    page = http.request("GET", url, headers=headers)
    print(page.data)
    location = json.loads(page.data).get('result').get('location')
    lng = location.get('lng')
    lat = location.get('lat')
    savelocation(id,province,position,lng,lat)
    return lng,lat

def savelocation(id,province,city,lng,lat):
    loccol = getConn('fortunedb','ChinaCompany2018')
    loccol.update({'_id':id},{'$set':{'lng':lng,'lat':lat,'province':province,'city':city}})


chinaCompany2018['city'] = chinaCompany2018.apply(lambda companys:getPositionByCity(companys),axis=1)
chinaCompany2018['province'] = chinaCompany2018.apply(lambda company:getPositionByProvince(company),axis=1)
chinaCompany2018['lng'],chinaCompany2018['lat'] = chinaCompany2018.apply(lambda company:getLocation(company['name'],company['_id_x'],company['city'],company['province'],headers),axis=1)

#initial heat map
data = [[chinaCompany2018.loc[i]['lat'],chinaCompany2018.loc[i]['lng'],1 ]for i in range(len(chinaCompany2018))]

map_osm = folium.Map(location=[35,110],zoom_start=5)
HeatMap(data).add_to(map_osm)
file_path = r"Spider\heatMap.html"
map_osm.save(file_path)
webbrowser.open(file_path)