# from urllib import request
# coding=UTF-8
from __future__ import print_function
import urllib2
import re
import json
import codecs
import time
import csv
from bs4 import BeautifulSoup
'''
dict = {'date':'2018-01-01',
        'content':['火箭vs勇士','湖人vs骑士'...],
        'url':[url1,url2,...]}

'''
def get_one_page(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'
    }
    req = urllib2.urlopen(url=url)
    # response = request.urlopen(req).read().decode('utf-8')
    return req

def parse_match(html):
    soup = BeautifulSoup(html)
    cheight = soup.find_all('div','cheight')
    urlprefix=u'http://stat-nba.com/'
    # d={}
    # data=[]
    # date=[]
    match_date=[]
    for div in cheight:
        if div.text.strip()!='':
            d={}
            date=div.find('font','cheightdate')
            # print(str(date.text))
            # date.append(str(date.string))
            d[u'date']=unicode(date.text)
            # a=div.find_all('a')
            matchs = []
            urls = []
            for match in div.find_all('a',href=re.compile('game1/.*?html',re.S)):
                matchs.append(unicode(match.text))
                urls.append(urlprefix+unicode(match['href']))
            d[u'content']=matchs
            d[u'url']=urls
            match_date.append(d)
        else:
            continue
    # print match_date[0]
    file=codecs.open('111.json',mode='w',encoding='utf-8')
    # json.dump(match_date,file,ensure_ascii=False,encoding='utf-8')
    # json.dumps(match_date)
    file.write(json.dumps(match_date,ensure_ascii=False,encoding='utf-8'))
    file.close()



def main(year,month):
    url = 'http://stat-nba.com/gameList_simple-'+str(year)+'-'+str(month)+'.html'
    html = get_one_page(url)
    parse_match(html)


def getteamInfo():
    urlprefix='http://www.stat-nba.com'
    url=urlprefix+'/teamList.php'
    html=urllib2.urlopen(url)
    page=BeautifulSoup(html)
    table=page.find('table','stat_box')
    area=['东部','西部','东南区','中区','大西洋区','太平洋区','西北区','西南区']
    index1=1
    index2=1
    file=codecs.open(r'.\team\team',mode='w',encoding='utf-8')
    file.write('球队名称 英文名 半区 具体区域 图标 主页位置')
    for td in table.find_all('td'):
        for div in td.find_all('div','team'):
            team=[]
            team.append(div.find('div').text.strip())
            team.append(div.find('img')['src'].split('/')[-1].split('.')[0])
            if index1<=15:
                team.append(area[0])
            else:
                team.append(area[1])
            team.append(area[index2+1])
            team.append(urlprefix+div.find('img')['src'])
            team.append(urlprefix+div.find('a')['href'])
            file.write(' '.join(team)+'\n')
            index1+=1
        index2+=1
    file.close()



if __name__ == '__main__':
    f=open('./knowledgeGraph/team2player')
    header=f.readline().strip().split(' ')
    rows=[]
    for l in f.readlines():
        data=(l.strip().split(' ')[0],l.strip().split(' ')[1],l.strip().split(' ')[2])
        rows.append(data)
    # print('大声道')
    file=open('team2player.csv',mode='a')
    f_csv=csv.writer(file,)
    f_csv.writerow(header)
    f_csv.writerows(data)

    # for j in range(2015,2018):
    #     for i in range(1, 13):
    #         if i < 10:
    #             i = '0' + str(i)
    #         else:
    #             i = str(i)
    #         main(year=str(j),month=str(i))
    #         time.sleep(1)
    # main(year='2018',month='01')
    # for i in range(5,-1,-1):
    #     msg='倒计时'+str(i)+'秒'
    #     print(msg,end="")
    #     print('\b' * len(msg) * 2, end="",flush=True)
    #     time.sleep(1)