# coding=UTF-8
from numpy import unicode

__author__ = 'Administrator'
import codecs
import json
import os
import random
import re
import socket
import time
import urllib.request as urllib2
from bs4 import BeautifulSoup

path = 'D:\\data\\baiketriples\\baike_triples.txt'  ##lynn
path_sys = 'D:\\WriteBot\\untitled\\'


def sumline(path):
    # 文件总计65001293行
    file = open(path, mode='r')
    num = 0
    for line in file:
        num += 1
    # print(num)
    file.close()


def getSomeData(path):
    file1 = open(path, mode='r')
    file2 = open('data.txt', mode='a')
    num = 0
    for line in file1:
        file2.write(line)
        num += 1
        if num > 10000:
            break
    file1.close()
    file2.close()


url = 'http://shuyantech.com/api/cndbpedia/ment2ent?q='


def getData(url, keyword):
    url = url + keyword
    result = urllib2.urlopen(url)

    # print(type(result))
    # print(result)


def getPlayers(url):
    html = urllib2.urlopen(url)
    bs = BeautifulSoup(html)
    table = bs.table
    players = table.find_all('a', href=re.compile(r'\\player\.*'))
    file1 = codecs.open('EnName', mode='a', encoding='utf-8')
    file2 = codecs.open('CnName', mode='a', encoding='utf-8')
    for p in players:
        content = p.text.split(',')
        ename = content[0]
        cname = content[1]
        if len(cname) == 0:
            continue
        file1.write(ename + '\n')
        file2.write(cname + '\n')
    file1.close()
    file2.close()


def getTeam():
    urlprefix = 'http://nba.sports.163.com/team/'
    index = 1000000001
    urlsuffix = '/structure/'
    # for tag in xrange(30):
    # index=index+1
    url = urlprefix + str(index) + urlsuffix
    html = urllib2.urlopen(url)
    page = BeautifulSoup(html)
    try:
        title = page.find('h1', 'tb-title').text
    except(Exception):
        title = str(index)

    file = codecs.open(title + '.txt', mode='a', encoding='utf-8')
    trs = page.table.find_all('tr')
    isth = False
    for tr in trs:
        # s=''
        info = []
        l = tr.find_all('td')
        if (len(l) == 0):
            l = tr.find_all('th')
            isth = True
        for i in l:
            # s+=i.text+' '
            info.append(i.text)
        # s+='\n'
        if info[1] == '':
            continue
        if isth:
            info.append(u'地址')
            isth = False
        else:
            info.append(r'http://nba.sports.163.com/' + l[1].a['href'])

        file.write(' '.join(info) + '\n')
    file.close()


def has_no_class_p(tag):
    return not tag.has_attr('class') and tag.name == 'p'


def has_no_class_div(tag):
    return not tag.has_attr('class') and tag.name == 'div'


def getResult():
    '''
    eg：HUPU
    新闻url:https://nba.hupu.com/games/recap/155788
    文字直播url:https://nba.hupu.com/games/playbyplay/155788
    数据url:https://nba.hupu.com/games/boxscore/155788
    其他:起始位置155788，156928
        终止位置156293，156934
    'playbyplay':文字直播
    'recap':赛后新闻
    'boxscore':球员数据
    :return:
    '''
    type = ['playbyplay/', 'recap/', 'boxscore/']
    urlprefix = 'https://nba.hupu.com/games/'
    # index=156294
    # print('dasfdadfasd')
    for i in range(156928 - 156934):
        try:
            page1 = BeautifulSoup(urllib2.urlopen(urlprefix + type[0] + str(i)), "html.parser")
            page2 = BeautifulSoup(urllib2.urlopen(urlprefix + type[1] + str(i)), "html.parser")
            page3 = BeautifulSoup(urllib2.urlopen(urlprefix + type[2] + str(i)), "html.parser")
            # index+=1

            # 爬取对应的文字直播数据
            div1 = page1.find('div', 'table_overflow')
            try:
                if len(div1.table.find_all('tr')) == 0:
                    continue
            except Exception:
                continue
            file1 = codecs.open(r'.\textlive\playbyplay' + '_' + str(i), mode='a', encoding='utf-8')
            file2 = codecs.open(r'.\news\recap' + '_' + str(i), mode='a', encoding='utf-8')
            file3 = codecs.open(r'.\playerData\boxscore' + '_' + str(i), mode='a', encoding='utf-8')
            head = page1.find('div', 'table_list_live')
            h = []
            for td in head.find_all('td'):
                h.append(td.text.strip())
            file1.write(','.join(h) + '\n')
            for tr in page1.find('div', 'table_overflow').find_all('tr'):
                line = []
                for td in tr.find_all('td'):
                    line.append(td.text.strip())
                file1.write(','.join(line) + '\n')

            # TODO
            # 爬取对应的新闻部分需要修改更新
            gameInfo = []
            for p in page2.find('div', 'about_fonts').find_all('p'):
                gameInfo.append(p.text.strip())
            file2.write(' '.join(gameInfo) + '\n')
            content = page2.find('div', 'news_box')
            title = content.find('h2').text.strip()
            file2.write(title + '\n')
            times = content.find('div', 'time').text.strip()
            file2.write(times + '\n')
            try:
                file2.write(u'图片:')
                file2.write(unicode(content.find('img')['src']))
                file2.write('\n')
            except Exception:
                file2.write(u'图片:无\n')
            msg = content.find_all(has_no_class_p)
            news = []
            for m in msg:
                if m.text.strip != '':
                    news.append(m.text.strip())
            file2.write('###'.join(news) + '\n')

            # 爬取对应比赛的球员数据
            table_list_live = page3.find_all('div', 'table_list_live')
            for tll in table_list_live:
                team = tll.find('h2').text.strip()
                file3.write(team + '\n')
                trs = tll.find_all('tr')
                column = []
                for td in trs[0].find_all('td'):
                    column.append(td.text.strip())
                column.insert(1, u'位置')
                file3.write(' '.join(column) + '\n')
                for tr in trs[1:]:
                    tds = tr.find_all('td')
                    if tds[0].text == u'替补':
                        continue
                    pdata = []
                    for td in tds:
                        pdata.append(td.text.strip())
                    file3.write(' '.join(pdata) + '\n')
            file1.close()
            file2.close()
            file3.close()
            # print(str(i) + '成功爬取')
            # break
        except Exception as e:
            print(e.message)
            # print(str(i) + '失败')
            time.sleep(1)


# lynn
def getdatabyid(index):
    '''
    根据id来获取数据
    :param index:场次编号
    :return:
    '''

    type = ['playbyplay/', 'recap/', 'boxscore/']
    urlprefix = 'https://nba.hupu.com/games/'
    try:
        # print('dasfdadfasd')
        page1 = BeautifulSoup(urllib2.urlopen(urlprefix + type[0] + str(index)), "html.parser")
        page2 = BeautifulSoup(urllib2.urlopen(urlprefix + type[1] + str(index)), "html.parser")
        page3 = BeautifulSoup(urllib2.urlopen(urlprefix + type[2] + str(index)), "html.parser")
        # index += 1

        # 爬取对应的文字直播数据
        div1 = page1.find('div', 'table_overflow')
        try:
            if len(div1.table.find_all('tr')) == 0:
                return
        except Exception:
            return

        # 命名有问题！待修改lynn（上面INDEX为什么要加一啊。。
        file1 = codecs.open(r'.\textlive\playbyplay' + '_' + str(index), mode='a', encoding='utf-8')
        file2 = codecs.open(r'.\news\recap' + '_' + str(index), mode='a', encoding='utf-8')
        file3 = codecs.open(r'.\playerData\boxscore' + '_' + str(index), mode='a', encoding='utf-8')
        head = page1.find('div', 'table_list_live')
        h = []
        for td in head.find_all('td'):
            h.append(td.text.strip())
        file1.write(','.join(h) + '\n')
        for tr in page1.find('div', 'table_overflow').find_all('tr'):
            line = []
            for td in tr.find_all('td'):
                line.append(td.text.strip())
            file1.write(','.join(line) + '\n')

        # TODO
        # 爬取对应的新闻部分需要修改更新
        gameInfo = []
        for p in page2.find('div', 'about_fonts').find_all('p'):
            gameInfo.append(p.text.strip())
        file2.write(' '.join(gameInfo) + '\n')

        try:
            content = page2.find('div', 'news_box')
            title = content.find('h2').text.strip()
            file2.write(title + '\n')
            times = content.find('div', 'time').text.strip()
            file2.write(times + '\n')
            try:
                file2.write(u'图片:')
                file2.write(unicode(content.find('img')['src']))
                file2.write('\n')
            except Exception:
                file2.write(u'图片:无\n')
            msg = content.find_all(has_no_class_p)
            news = []
            for m in msg:
                if m.text.strip != '':
                    news.append(m.text.strip())
            file2.write('###'.join(news) + '\n')
        except Exception:
            print("No report found")

        # 爬取对应比赛的球员数据
        table_list_live = page3.find_all('div', 'table_list_live')
        for tll in table_list_live:
            team = tll.find('h2').text.strip()
            file3.write(team + '\n')
            trs = tll.find_all('tr')
            column = []
            for td in trs[0].find_all('td'):
                column.append(td.text.strip())
            column.insert(1, u'位置')
            file3.write(' '.join(column) + '\n')
            for tr in trs[1:]:
                tds = tr.find_all('td')
                if tds[0].text == u'替补':
                    continue
                pdata = []
                for td in tds:
                    pdata.append(td.text.strip())
                file3.write(' '.join(pdata) + '\n')
        file1.close()
        file2.close()
        file3.close()
        # print(str(index) + '成功爬取')
        # break
    except Exception as e:
        print(e.message)
        # print(str(index) + '失败')
        time.sleep(1)


def getNews(index):
    '''
    单独获取新闻
    :param index:
    :return:
    '''
    urlprefix = 'https://nba.hupu.com/games/recap/'
    html = urllib2.urlopen(urlprefix + str(index - 1))
    page = BeautifulSoup(html)
    filename = r'.\news\recap' + '_' + str(index)
    file = codecs.open(filename, mode='a', encoding='utf-8')
    file.truncate()
    gameInfo = []
    for p in page.find('div', 'about_fonts').find_all('p'):
        gameInfo.append(p.text.strip())
    file.write(' '.join(gameInfo) + '\n')
    content = page.find('div', 'news_box')
    title = content.find('h2').text.strip()
    file.write(title + '\n')
    times = content.find('div', 'time').text.strip()
    file.write(times + '\n')
    try:
        file.write(u'图片:')
        file.write(unicode(content.find('img')['src']))
        file.write('\n')
    except Exception:
        file.write(u'图片:无\n')
    msg = content.find_all(has_no_class_p)
    news = []
    for m in msg:
        if m.text.strip != '':
            news.append(m.text.strip())
    if news[0] == '':
        msg = content.find_all(has_no_class_div)
        news = []
        for m in msg:
            if m.text.strip != '':
                news.append(m.text.strip())
    file.write('###'.join(news) + '\n')


def checknews():
    files = os.listdir(r'.\news')
    for f in files:
        file = open('.\\news\\' + f).readlines()
        if file[-1].strip() == '':
            print(f)


def check():
    '''
    检查未爬取的页面
    :return:
    '''
    file = open('log')
    st = 155788
    p = range(155789, 156294)
    for line in file:
        p.remove(int(line[:6]))
    for l in p:
        print(str(l) + '失败了')


def getMatch():
    '''
    爬取赛程，指定url未nba-stat
    :return:
    '''
    url = 'http://www.stat-nba.com/gameList_simple.html'
    html = urllib2.urlopen(url)
    page = BeautifulSoup(html)
    tag = page.find_all('a', href=re.compile(r'game1/,*'))
    schedule = []
    for t in tag:
        schedule.append(t.text)


def getplayerComparison(player1, player2):
    # TODO 重要！！！！
    '''
    得到球员之间的历史交手记录
    :param player1:
    :param player2:
    :return:
    '''
    '''
    url:http://www.stat-nba.com/query_playercompare.php?Player_id=1861&Oplayer_id=195
    :return:
    '''
    return ''


def get_team_performance(team_a, team_b):
    # TODO 重要！！！！
    '''
    得到球队之间的交手记录
    :param team1:
    :param team2:
    :return:
    '''
    '''
    https://nba.hupu.com/teams
    http://www.stat-nba.com/query_team.php?QueryType=game1&Team_id=WAS&TOpponent_id=MIA&PageNum=100
    '''
    url1 = 'https://nba.hupu.com/teams'
    page = urllib2.urlopen(url1).readlines()
    content = ''.join(page)
    team_a_performance = re.search(team_a + '</h2>\s<p>\((.*)\)', content).group(1)
    team_b_performance = re.search(team_b + '</h2>\s<p>\((.*)\)', content).group(1)

    return team_a_performance, team_b_performance


def get_team_comparison(team_a, team_b, team_a_name, team_b_name, result):
    # TODO 重要！！！！
    '''
    得到球队之间的交手记录
    :param team1:
    :param team2:
    :return:
    '''
    '''
    https://nba.hupu.com/teams
    http://www.stat-nba.com/query_team.php?QueryType=game1&Team_id=WAS&TOpponent_id=MIA&PageNum=100
    '''
    url = 'http://www.stat-nba.com/query_team.php?QueryType=game1&Team_id={}&TOpponent_id={}&PageNum=100'.format(team_a,
                                                                                                                 team_b)
    page = urllib2.urlopen(url)
    content = BeautifulSoup(page)
    table = content.find('tbody')
    win = 0
    loss = 0
    for tr in table.find_all('tr'):
        tds = tr.find_all('td')
        if tds[3].text == u'胜':
            win += 1
        elif tds[3].text == u'负':
            loss += 1
        if result.has_key(tds[2].text):
            result[tds[2].text].append(team_b_name + u'队' + ' ' + tds[5].text + ' ' + team_a_name + u'队')
        else:
            result[tds[2].text] = [team_b_name + u'队' + ' ' + tds[5].text + ' ' + team_a_name + u'队']
    info = []
    info.append(team_a_name + u'队' + ' ' + str(win) + u'胜' + str(loss) + u'负' + ' ' + team_b_name + u'队')
    return info[0]


def get_team_comparison2(team_a, team_b, time):
    url = 'http://www.stat-nba.com/query_team.php?crtcol=date_out&order=1&QueryType=game&Team_id={}&TOpponent_id={}&PageNum=100'.format(
        team_a, team_b)
    page = urllib2.urlopen(url)
    content = BeautifulSoup(page)
    table = content.find('tbody')
    url2 = ''
    for tr in table.find_all('tr'):
        tds = tr.find_all('td')
        if tds[2].text == time:
            postfix = tds[5].a.get_attribute_list('href')[0][1:]
            url2 = 'http://www.stat-nba.com' + postfix
            break
    # print(url2)
    page2 = urllib2.urlopen(url2).read()  # lynn url2有问题

    page2 = page2.decode('utf-8')  ##lynn
    team_a_record = re.findall('赛前战绩 (.+)\)', page2)[0]
    team_b_record = re.findall('赛前战绩 (.+)\)', page2)[1]
    return team_a_record, team_b_record


def get_team_knowledge():
    # TODO
    return ''


def get_game_info():
    '''
    得到比赛信息，把比赛作为一个实体
    开始id:42094
    结束id:43126
    :return:
    '''
    url = 'http://www.stat-nba.com/game1/'
    urlpostfix = '.html'
    game_id = []
    file = codecs.open('.\knowledgeGraph\gameInfo', mode='a', encoding='utf-8')
    for i in range(42855, 43127):
        info = []
        try:
            page = urllib2.urlopen(url + str(i) + urlpostfix, timeout=10).read()
        except:
            time.sleep(5)
            page = urllib2.urlopen(url + str(i) + urlpostfix, timeout=10).read()
        content = BeautifulSoup(page)
        if len(content.find_all('div')) == 0:
            continue
        id = i
        date = re.search('\d+\-\d+\-\d+', page).group()
        d = date.split('-')
        d[-1] = str(int(d[-1]) + 1)
        date = '-'.join(d)
        detail = content.find('div', 'detail')
        team_a = detail.find_all('div', 'title')[0].a.text
        team_b = detail.find_all('div', 'title')[1].a.text
        team_a_before = re.findall('赛前战绩 (.+)\)', page)[0]
        team_b_before = re.findall('赛前战绩 (.+)\)', page)[1]
        score = content.find_all('td', 'number')
        score_a1 = score[0].text
        score_a2 = score[1].text
        score_a3 = score[2].text
        score_a4 = score[3].text
        score_b1 = score[4].text
        score_b2 = score[5].text
        score_b3 = score[6].text
        score_b4 = score[7].text
        info.append(unicode(str(i)))
        info.append(team_a)
        info.append(team_b)
        info.append(team_a_before.decode('utf-8'))
        info.append(team_b_before.decode('utf-8'))
        info.append(date.decode('utf-8'))
        info.append(score_a1)
        info.append(score_a2)
        info.append(score_a3)
        info.append(score_a4)
        info.append(score_b1)
        info.append(score_b2)
        info.append(score_b3)
        info.append(score_b4)
        game_id.append(i)
        # s=str(i)+' '
        # s+=team_a+' '
        # s+=team_b+' '
        # print type(team_a_before)
        # print type('上的')
        # s+=team_a_before.encode('utf-8')+' '
        # s+=team_b_before.encode('utf-8')+' '
        # s+=date+' '+score_a1+' '+score_a2+' '+score_a3+' '+score_a4+' '+score_b1+' '+score_b2+' '+score_b3+' '+score_b4
        # print(str(i) + '已完成')
        file.write(' '.join(info) + '\n')
        file.flush()
    file.close()


def getplayer_id(tag, end=None):
    '''
    得到球员指定的编号
    :return:
    '''
    index = tag
    urlprefix = 'http://www.stat-nba.com/player/'
    # cnames={}
    # enames={}
    f_time = 0
    file1 = codecs.open(r'.\playerinfo\eplayer2id', mode='a', encoding='utf-8')
    file2 = codecs.open(r'.\playerinfo\cplayer2id', mode='a', encoding='utf-8')
    # header = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
    while True:
        try:
            # request = urllib2.Request(urlprefix + str(index) + '.html', headers=header)
            page = BeautifulSoup(urllib2.urlopen(urlprefix + str(index) + '.html', timeout=5))
            name = page.find('div', 'name').text.split('\n')[0].split('/')
            if len(name) == 2:
                cname = name[0]
                ename = name[1]
            else:
                index += 1
                continue
            # cnames[cname]=index
            # enames[ename]=index
            file1.write(ename + ':' + str(index) + '\n')
            file2.write(cname + ':' + str(index) + '\n')
            file1.flush()
            file2.flush()
            # print(str(index) + '成功')
            index += 1
            f_time = 0
            # json.dump(enames, file1, ensure_ascii=False, encoding='utf-8')
            # json.dump(cnames, file2, ensure_ascii=False, encoding='utf-8')
        except socket.timeout:
            # print('连接超时，休息5秒')
            for i in range(5, -1, -1):
                # print(i)
                time.sleep(1)
        except urllib2.HTTPError as e:
            f_time += 1
            # print(e.message)
            # print('+++++++++++++++++++')
            # print(str(index) + '失败')
            index += 1
            if end == None:
                if f_time >= 10:
                    break
            else:
                if index >= end:
                    break

        # file1=open(r'.\playerinfo\eplayer2id.json',mode='w')
        # file2=open(r'.\playerinfo\cplayer2id.json',mode='w')
        # json.dump(enames,file1,ensure_ascii=False,encoding='utf-8')
        # json.dump(cnames,file2,ensure_ascii=False,encoding='utf-8')
    file1.close()
    file2.close()


def run_auto():
    '''
    自动定时执行
    :return:
    '''
    record = []
    while True:
        getplayer_id(1651, record)


def checkname2id():
    '''
    检测缺失的ID
    :return:
    '''
    path1 = path_sys + r'playerInfo\cplayer2id'  ##lynn
    path2 = path_sys + r'playerInfo\CnName'  ##lynn
    file = open(path1)
    file2 = open(path2)
    cur_name = []
    miss_num = []
    # cur_num=[]
    all_name = []
    all_num = []
    for line in file2:
        cur_name.append(line.strip())
    pre = 0
    for line in file:
        name = line.split(':')[0]
        num = int(line.split(':')[1])
        all_name.append(name)
        all_num.append(num)
        if num == pre + 1:
            pre = num
        elif num == pre:
            print(str(num) + '重复了')
        else:
            for i in range(pre + 1, num):
                miss_num.append(i)
            pre = num
    for name in cur_name:
        if name not in all_name:
            print(name)

    print('缺失的号码：')
    print(miss_num)
    file.close()
    file2.close()


# def synchronize_files():
#     cur_player_c, cur_player_e, all_player_c, all_player_e, index = loadplayer()
#     file = codecs.open(r'.\playerInfo\CnName', encoding='utf-8')
#     s = u''
#     id = 0
#     for line in file:
#         if line.strip() not in all_player_c and cur_player_e[id] not in all_player_e:
#             print('**************')
#             print(line)
#             print('**************')
#         if line.strip() not in all_player_c and cur_player_e[id] in all_player_e:
#             print('##############')
#             print(line)
#             print(cur_player_e[id])
#             print(all_player_c[all_player_e.index(cur_player_e[id])])
#             print('##############')
#             s = s + all_player_c[all_player_e.index(cur_player_e[id])] + '\n'
#         if line.strip() in all_player_c and cur_player_e[id] in all_player_e:
#             s = s + line
#         id += 1
#     with codecs.open('.\playerInfo\CnName.new', mode='w', encoding='utf-8') as f:
#         f.write(s.strip())
#     file.close()


def loadplayer():
    '''
    :return:返回所有球员姓名信息
    '''
    cur_player_c = []  # 现役球员中文名
    cur_player_e = []  # 现役球员英文名
    all_player_e = []  # 全部球员英文名
    all_player_c = []  # 全部球员中文名
    id = []  # 全员对应的id编号

    # 以下路径均暂时写死 ##lynn 增加文件夹目录
    with codecs.open(path_sys + 'playerInfo\CnName', encoding='utf-8') as file:
        for l in file:
            cur_player_c.append(l.strip())
    with codecs.open(path_sys + 'playerInfo\EnName', encoding='utf-8') as file:
        for l in file:
            cur_player_e.append(l.strip())
    with codecs.open(path_sys + 'playerInfo\cplayer2id', encoding='utf-8') as file:
        for l in file:
            all_player_c.append(l.strip().split(':')[0])
            id.append(l.strip().split(':')[1])
    with codecs.open(path_sys + 'playerInfo\eplayer2id', encoding='utf-8') as file:
        for l in file:
            all_player_e.append(l.split(':')[0])
    return cur_player_c, cur_player_e, all_player_c, all_player_e, id


def getteamInfo():
    urlprefix = 'http://www.stat-nba.com'
    url = urlprefix + '/teamList.php'
    html = urllib2.urlopen(url)
    html = html.decode('utf-8')  ##lynn
    page = BeautifulSoup(html)
    table = page.find('table', 'stat_box')
    area = [u'东部', u'西部', u'东南区', u'中区', u'大西洋区', u'太平洋区', u'西北区', u'西南区']
    index1 = 1
    index2 = 1
    file = codecs.open(r'.\team\team', mode='w', encoding='utf-8')
    file.write(u'球队名称 英文名 半区 具体区域 图标 主页位置\n')
    for td in table.find_all('td'):
        for div in td.find_all('div', 'team'):
            team = []
            team.append(div.find('div').text.strip())
            team.append(div.find('img')['src'].split('/')[-1].split('.')[0])
            if index1 <= 15:
                team.append(area[0])
            else:
                team.append(area[1])
            team.append(area[index2 + 1])
            team.append(urlprefix + div.find('img')['src'])
            team.append(urlprefix + div.find('a')['href'])
            file.write(' '.join(team) + u'\n')
            index1 += 1
        index2 += 1
    file.close()


# 修改去掉recap与boxsocre
def loadtextlive(playbyplay_path='', recap_path='', boxscore_path='', id=None):
    '''
    加载文字直播，返回文字直播数组+比赛信息
    :return: gameInfo:比赛信息，如时间，地点等
    起始位置155788
    终止位置156293
    '''
    if id is None:
        # id = random.randint(156293, 156935)
        # path1 = path_sys + r'textlive\playbyplay_' + str(id)  # lynn 添加系统路径
        # path2 = path_sys + r'news\recap_' + str(id)  # lynn 添加系统路径
        # path3 = path_sys + r'playerData\boxscore_' + str(id)  # lynn 添加系统路径
        # while not os.path.exists(path1):
        #     id = random.randint(155788, 156293)
        #     path1 = path_sys + r'textlive\playbyplay_' + str(id)  ##lynn 添加系统路径
        #     path2 = path_sys + r'news\recap_' + str(id)  ##lynn 添加系统路径
        #     path3 = path_sys + r'playerData\boxscore_' + str(id)  ##lynn 添加系统路径
        path1 = playbyplay_path
        path2 = recap_path
        path3 = boxscore_path
    else:
        path1 = path_sys + r'textlive\playbyplay_' + str(id)  ##lynn 添加系统路径
        path2 = path_sys + r'news\recap_' + str(id)  ##lynn 添加系统路径
        path3 = path_sys + r'playerData\boxscore_' + str(id)  ##lynn 添加系统路径

    # print(id)

    file1 = open(path1, encoding='UTF-8')
    file2 = open(path2, encoding='UTF-8')
    file3 = open(path3, encoding='UTF-8')
    f2 = file2.readlines()
    gameInfo = f2[0].strip()
    # label_news = f2[-1].strip() #lynn 去掉
    file2.close()
    scripts = []
    is_host = False
    player_data_guest = []
    player_data_host = []
    for line in file1:
        scripts.append(line.strip())
    for line in file3:
        if '客队' in line:
            continue
        if '主队' in line:
            is_host = True
            continue
        if not is_host:
            if '命中率' not in line and '统计' not in line and '首发' not in line:
                player_data_guest.append(line.strip())
        else:
            if '命中率' not in line and '统计' not in line and '首发' not in line:
                player_data_host.append(line.strip())

    # return gameInfo, scripts, player_data_guest, player_data_host, label_news
    return gameInfo, scripts, player_data_guest, player_data_host


def getXinlang_one(room_id, msg_id=0, msg=[]):
    '''
    http://rapid.sports.sina.com.cn/live/api/msg/index?callback=cb_livercast_f23b83f6_2343_4ee2_8419_8ed143d954c1&room_id=sports%3A2018111711&count=30&msg_id=0&direct=-1&dpc=1
    https://rapid.sports.sina.cn/live/api/msg/index?callback=cb_fd1a147b_107e_47ac_b7e7_80c06fe17fa0&room_id=sports%3A2018111733&count=20&msg_id=250933236214654693&direct=-1&dpc=1
    :return:
    '''
    url_pre = 'http://rapid.sports.sina.com.cn/live/api/msg/index?callback=cb_livercast_f23b83f6_2343_4ee2_8419_8ed143d954c1&room_id=sports%3A'
    url_mid = '&count=30&msg_id='
    url_post = '&direct=-1&dpc=1'
    try:
        print(url_pre + room_id + url_mid + str(msg_id) + url_post)
        page = urllib2.urlopen(url_pre + room_id + url_mid + str(msg_id) + url_post, timeout=5).read()
    except Exception:
        print('请求超时，休息3秒')
        time.sleep(3)
        page = urllib2.urlopen(url_pre + room_id + url_mid + str(msg_id) + url_post, timeout=5).read()
    content = re.match('try\{cb_livercast_f23b83f6_2343_4ee2_8419_8ed143d954c1\((.*)\)\;\}catch\(e\)\{\};', page).group(
        1)
    result = json.loads(content)
    if len(result['result']['data']) == 0 and msg_id != 0:
        file = codecs.open('.\\textlive\\xinlang2\\' + str(room_id), mode='a', encoding='utf-8')
        a = 0
        for t in msg:
            file.write(t + '\n')
        print('获取成功')
        file.flush()
        file.close()
    if len(result['result']['data']) == 0:
        return
    for d in result['result']['data']:
        # print d
        m = []
        m.append(d['pub_time'])
        if d.has_key('text'):
            m.append(d['text'])
        elif d.has_key('pic'):
            m.append(d['pic'])
        else:
            m.append('')
        r_id = d['room_id']
        id = d['id']
        m.append(d['match']['phase'])
        m.append(d['match']['score1'] + '-' + d['match']['score2'])
        msg.append(u'\t'.join(m))
    getXinlang_one(room_id, msg_id=id, msg=msg)


def getXinLang():
    year = [2018, 2019]
    month = [10, 11, 12, 1]
    day = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18',
           '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31']
    index = range(1, 51)
    for y in year:
        for m in month:
            for d in day:
                fail = 0
                for i in index:
                    if m == 10 and int(d) < 25:
                        continue
                    try:
                        room_id = str(y) + str(m) + d + str(i)
                        page = urllib2.urlopen(
                            'http://rapid.sports.sina.com.cn/live/api/msg/index?callback=cb_livercast_f23b83f6_2343_4ee2_8419_8ed143d954c1&room_id=sports%3A{}&count=30&msg_id=0&direct=-1&dpc=1'.format(
                                room_id), timeout=5).read()
                        content = re.match(
                            'try\{cb_livercast_f23b83f6_2343_4ee2_8419_8ed143d954c1\((.*)\)\;\}catch\(e\)\{\};',
                            page).group(1)
                        result = json.loads(content)
                        if len(result['result']['data']) == 0:
                            fail += 1
                            continue
                        else:
                            getXinlang_one(room_id, msg=[])
                            print(room_id + '成功')
                            fail = 0
                        if fail >= 5:
                            print(str(y) + str(m) + str(d) + '无数据')
                            break
                    except Exception:
                        print('请求超时，暂停5秒')
                        time.sleep(5)
                        room_id = str(y) + str(m) + str(d) + str(i)
                        page = urllib2.urlopen(
                            'http://rapid.sports.sina.com.cn/live/api/msg/index?callback=cb_livercast_f23b83f6_2343_4ee2_8419_8ed143d954c1&room_id=sports%3A{}&count=30&msg_id=0&direct=-1&dpc=1'.format(
                                room_id), timeout=5).read()
                        content = re.match(
                            'try\{cb_livercast_f23b83f6_2343_4ee2_8419_8ed143d954c1\((.*)\)\;\}catch\(e\)\{\};',
                            page).group(1)
                        result = json.loads(content)
                        if len(result['result']['data']) == 0:
                            fail += 1
                            continue
                        else:
                            getXinlang_one(room_id, 0, msg=[])
                            print(room_id + '成功')
                            fail = 0
                        if fail >= 5:
                            print(str(y) + str(m) + str(d) + '无数据')
                            break


def get_CBA_data_id(id):
    # url='http://api.sports.sina.com.cn/?p=live&s=livecast&a=livecastlog&id=8&dpc=1'
    url = 'http://api.sports.sina.com.cn/?p=live&s=livecast&a=livecastlog&id=' + str(id) + '&dpc=1'
    page = urllib2.urlopen(url, timeout=5).read()
    if page != '':
        filename = r'.\textlive\CBA' + r'\cba' + str(id)
        file = codecs.open(filename, mode='w', encoding='utf-8')
        file.write(page.decode('utf-8'))  ##lynn
        file.close()
        print('获得' + str(id) + '成功')


def get_CBA_data():
    id = 199501
    for i in range(1500):
        try:
            get_CBA_data_id(id)
        except Exception:
            time.sleep(5)
            get_CBA_data_id(id)
        id -= 1


def get_corresponding_match():
    url1 = 'http://lives.sina.cn/?vt=4&date='
    # url2='https://rapid.sports.sina.cn/live/api/live/room?callback=cb_e263ecf3_f8c2_484b_968b_22f422e00f39&match_id=196617&dpc=1'
    f1 = open(r'.\date')
    dates = []
    team = []
    for l in f1.readlines():
        dates.append(l.strip().split(' ')[0].replace('年', '-').replace('月', '-').replace('日', ''))
        team.append(l.strip().split(' ')[1])
    f1.close()
    for d, t in zip(dates, team):
        u1 = url1 + d
        page = urllib2.urlopen(u1)
        content = BeautifulSoup(page)
        dds = content.find_all('dd')
        room_id = ''
        # print type(t)
        for dd in dds:
            # print dd.h2.a.text
            # print t
            if t.decode('utf-8') in dd.h2.a.text:
                matchid = dd.div.h4.a['href'].split('=')[-1]
                u2 = 'http://rapid.sports.sina.cn/live/api/live/room?callback=cb_e263ecf3_f8c2_484b_968b_22f422e00f39&match_id={}&dpc=1'.format(
                    matchid)
                room_info = urllib2.urlopen(u2).read()
                room_id = re.findall(r'sports:([0-9]+)\"', room_info)[0]
                print(room_id)
                break
        if room_id == '':
            print(d + ' ' + t + ' 获取失败')
            continue
        getXinlang_one(room_id, msg=[])


if __name__ == '__main__':
    # getSomeData(path)
    # getPlayers('http://nba.sports.163.com/player/')
    # getResult()
    # getNews(155901)
    # getNews(156062)
    # checknews()
    #  getplayer_id(4253,5000)
    #  checkname2id()
    #  synchronize_files()
    #  getteamInfo()
    # get_game_info()
    # getXinlang_one('2019022432')
    # getXinLang()
    # r1,r2=get_team_comparison2('MIA','BOS','2019-04-03')
    # print r1,r2
    # get_CBA_data()
    # http://rapid.sports.sina.com.cn/live/api/msg/index?callback=cb_livercast_f23b83f6_2343_4ee2_8419_8ed143d954c1&room_id=sports%3A2019022432&count=30&msg_id=286470941790224784&direct=-1&dpc=1
    # get_corresponding_match()
    # a = 0
    getdatabyid(157383)
