import requests
from bs4 import BeautifulSoup
import random
import time
import json
from django.http import JsonResponse
from common.error import *
from django.conf import settings
import os
import uuid
from pymongo import MongoClient

# coding:utf-8
url_root_path = 'https://nba.hupu.com/games/'
change_path = ["recap/", "boxscore/", "playbyplay/"]
now_title = ["recap_", "boxscore_", "playbyplay_"]
user_agent = [
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
    'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0',
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
    "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
    "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
    "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
    "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)"
]  # 伪装表头，防止反爬

client = MongoClient('mongodb://127.0.0.1:27017/')  # 本地
db = client['match']
collection1 = db['matchlist']
collection2 = db['basicmatchdetail']


def get_hupu_data(match, type, faddress):
    news_id = match['hupu_id']
    headers = {'User-Agent': random.choice(user_agent)}  # 随机选择一个机型反爬
    uu_id = str(uuid.uuid1())
    if type == 'playbyplay':  # 生成 play to play
        j = 2
        url = url_root_path + change_path[j] + news_id  # 循环构建新的链接地址
        html = requests.get(url, headers=headers, timeout=30)
        html.raise_for_status()
        html.encoding = 'utf-8'
        bs = BeautifulSoup(html.text, 'html.parser')
        # 以上都是载入网页
        ptp_table = bs.select("div.table_list_live.playbyplay_td.table_overflow > table > tr")
        ptp_intro = bs.select("div.table_list_live.playbyplay_td > table > tr.title.bg_a")[0].get_text().split('\n')
        ptp_intro = list(filter(None, ptp_intro))

        temp_path = '/file/basketball/' + match['date'] + '/' + uu_id + '/' + match[
            'team'] + '/playbyplay/'

        if not os.path.exists(faddress + temp_path):
            os.makedirs(faddress + temp_path)
        path = temp_path + now_title[j] + news_id

        match.update({'playbyplay': path})
        with open(faddress + path, 'w+', encoding='utf-8') as file_handle:  # 新建文件
            file_handle.write(','.join(ptp_intro))  # 写入标题行
            for line in ptp_table:  # 循环写入所有数据
                file_handle.write(
                    '\n' + str(','.join(list(filter(None, line.get_text().split('\n'))))))  # 逐行写入文字实录数据
    elif type == 'boxscore':  # 生成 boxscore
        j = 1
        url = url_root_path + change_path[j] + news_id  # 循环构建新的链接地址
        html = requests.get(url, headers=headers, timeout=30)
        html.raise_for_status()
        html.encoding = 'utf-8'
        bs = BeautifulSoup(html.text, 'html.parser')
        # 以上都是载入网页
        bs_table1 = bs.find('table', id='J_away_content')
        bs_table2 = bs.find('table', id='J_home_content')
        bs_single_line1 = bs_table1.find_all('tr')  # 爬取第一个表格
        bs_single_line2 = bs_table2.find_all('tr')  # 爬取第二个表格
        bs_intro1 = bs.select('div.clearfix > h2')[0].get_text().split('\n')  # 爬取第一个标题行
        bs_intro2 = bs.select('div.clearfix > h2')[1].get_text().split('\n')  # 爬取第二个标题行
        temp_path = '/file/basketball/' + match['date'] + '/' + uu_id + '/' + match[
            'team'] + '/boxscore/'
        if not os.path.exists(faddress + temp_path):
            os.makedirs(faddress + temp_path)
        path = temp_path + now_title[j] + news_id
        match.update({'boxscore': path})
        with open(faddress + path, 'w+', encoding='utf-8') as file_handle:  # 新建文件
            file_handle.write(''.join(list(filter(None, bs_intro1))))  # 写入第一段标题行
            for line in bs_single_line1:  # 循环写入所有数据
                table_line1 = str(' '.join(list(filter(None, line.get_text().split('\n')))))
                if table_line1[0:2] == "替补":
                    continue
                elif table_line1[0:2] == "首发":
                    str_list = list(table_line1)
                    str_list.insert(3, "位置  ")  # 添加位置
                    file_handle.write('\n' + ''.join(str_list))
                else:
                    file_handle.write('\n' + table_line1)

            file_handle.write('\n')
            file_handle.write(''.join(list(filter(None, bs_intro2))))  # 写入第一段标题行
            for line in bs_single_line2:  # 循环写入所有数据
                table_line2 = str(' '.join(list(filter(None, line.get_text().split('\n')))))
                if table_line2[0:2] == "替补":
                    continue
                elif table_line2[0:2] == "首发":
                    str_list1 = list(table_line2)
                    str_list1.insert(3, "位置  ")  # 添加位置
                    file_handle.write('\n' + ''.join(str_list1))
                else:
                    file_handle.write('\n' + table_line2)  # 逐行写入文字实录数据
            # url 在这里仿造成stat，实际是虎扑，为了之后查找数据的不用修改
        db = {'url': 'http://stat-nba.com/game/' + news_id + '.html', 'team1Info': '', 'team2Info': '',
              'team1Home': '', 'team2Home': '', 'team1Score': '',
              'team2Score': '', 'team1Detail': '', 'team1Summary': '', 'team2Detail': '', 'team2Summary': ''}
        away_scores = bs.select('div.yuece_num_a> table.itinerary_table >tbody> tr.away_score > td')  # 存储至数据库给新数据使用
        home_scores = bs.select('div.yuece_num_a> table.itinerary_table >tbody> tr.home_score > td')
        team1Score = []
        team2Score = []
        team1Detail = []
        team2Detail = []
        db.update({'team1Info': [' ', away_scores[0].text, ' '], 'team2Info': [' ', home_scores[0].text, ' '],
                   'team1Home': [str(eval(away_scores[-1].text)), '客场'],
                   'team2Home': [str(eval(home_scores[-1].text)), '主场']})
        for i in range(len(away_scores[1:-1])):
            team1Score.append(str(eval(away_scores[1:-1][i].text)))
            team2Score.append(str(eval(home_scores[1:-1][i].text)))
        db.update({'team1Score': team1Score, 'team2Score': team2Score})
        for line in bs_single_line1:  # 客队
            try:
                table_line1 = str(' '.join(list(filter(None, line.get_text().split('\n')))))
                if table_line1[0:2] == "替补":
                    continue
                elif table_line1[0:2] == "首发":
                    str_list = list(table_line1)
                    str_list.insert(3, "位置  ")  # 添加位置
                elif table_line1[0:2] == "统计":
                    temp = table_line1
                elif table_line1[0:2] in "命中率":
                    pass
                else:
                    team1Detail.append(zhengliren(table_line1))
            except:
                pass
        team1Summary = [zhenglidui(temp, len(team1Detail))]
        db.update({'team1Detail': team1Detail, 'team1Summary': team1Summary})
        for line in bs_single_line2:  # 主队
            try:
                table_line2 = str(' '.join(list(filter(None, line.get_text().split('\n')))))
                if table_line2[0:2] == "替补":
                    continue
                elif table_line2[0:2] == "首发":
                    str_list = list(table_line2)
                    str_list.insert(3, "位置  ")  # 添加位置
                elif table_line2[0:2] == "统计":
                    temp = table_line2
                elif table_line2[0:2] in "命中率":
                    pass
                else:
                    team2Detail.append(zhengliren(table_line2))
            except:
                pass
        team2Summary = [zhenglidui(temp, len(team2Detail))]
        db.update({'team2Detail': team2Detail, 'team2Summary': team2Summary})

        if collection2.count_documents({'url': str(db['url'])}) == 0:
            collection2.insert_one(db)
            print('比赛%s基础统计数据爬取完成' % ('http://stat-nba.com/game/' + news_id + '.html'))
        else:
            collection2.replace_one({'url': db['url']}, db)
            print('比赛%s基础统计数据更新入库' % ('http://stat-nba.com/game/' + news_id + '.html'))
    elif type == 'recap':  # recap
        j = 0
        url = url_root_path + change_path[2] + news_id  # 循环构建新的链接地址
        html = requests.get(url, headers=headers, timeout=30)
        html.raise_for_status()
        html.encoding = 'utf-8'
        bs = BeautifulSoup(html.text, 'html.parser')
        # 以上都是载入网页
        line1 = bs.select('div.about_fonts.clearfix > p')  # 爬取简
        temp_path = '/file/basketball/' + match['date'] + '/' + uu_id + '/' + match[
            'team'] + '/recap/'
        if not os.path.exists(faddress + temp_path):
            os.makedirs(faddress + temp_path)
        path = temp_path + now_title[j] + news_id
        match.update({'recap': path})
        with open(faddress + path, 'w+', encoding='utf-8') as file_handle:  # 新建文件
            for line_1 in line1:  # 循环写入所有数据
                file_handle.write(''.join(list(filter(None, line_1.get_text()))))  # 逐行写入简介
    elif type == 'boxscoremongodb':
        j = 1
        url = url_root_path + change_path[j] + news_id  # 循环构建新的链接地址
        html = requests.get(url, headers=headers, timeout=30)
        html.raise_for_status()
        html.encoding = 'utf-8'
        bs = BeautifulSoup(html.text, 'html.parser')
        # 以上都是载入网页
        bs_table1 = bs.find('table', id='J_away_content')
        bs_table2 = bs.find('table', id='J_home_content')
        bs_single_line1 = bs_table1.find_all('tr')  # 爬取第一个表格
        bs_single_line2 = bs_table2.find_all('tr')  # 爬取第二个表格
        bs_intro1 = bs.select('div.clearfix > h2')[0].get_text().split('\n')  # 爬取第一个标题行
        bs_intro2 = bs.select('div.clearfix > h2')[1].get_text().split('\n')  # 爬取第二个标题行

        # url 在这里仿造成stat，实际是虎扑，为了之后查找数据的不用修改
        db = {'url': 'http://stat-nba.com/game/' + news_id + '.html', 'team1Info': '', 'team2Info': '',
              'team1Home': '', 'team2Home': '', 'team1Score': '',
              'team2Score': '', 'team1Detail': '', 'team1Summary': '', 'team2Detail': '', 'team2Summary': ''}
        away_scores = bs.select('div.yuece_num_a> table.itinerary_table >tbody> tr.away_score > td')  # 存储至数据库给新数据使用
        home_scores = bs.select('div.yuece_num_a> table.itinerary_table >tbody> tr.home_score > td')
        team1Score = []
        team2Score = []
        team1Detail = []
        team2Detail = []
        db.update({'team1Info': [' ', away_scores[0].text, ' '], 'team2Info': [' ', home_scores[0].text, ' '],
                   'team1Home': [str(eval(away_scores[-1].text)), '客场'],
                   'team2Home': [str(eval(home_scores[-1].text)), '主场']})
        for i in range(len(away_scores[1:-1])):
            team1Score.append(str(eval(away_scores[1:-1][i].text)))
            team2Score.append(str(eval(home_scores[1:-1][i].text)))
        db.update({'team1Score': team1Score, 'team2Score': team2Score})
        for line in bs_single_line1:  # 客队
            try:
                table_line1 = str(' '.join(list(filter(None, line.get_text().split('\n')))))
                if table_line1[0:2] == "替补":
                    continue
                elif table_line1[0:2] == "首发":
                    str_list = list(table_line1)
                    str_list.insert(3, "位置  ")  # 添加位置
                elif table_line1[0:2] == "统计":
                    temp = table_line1
                elif table_line1[0:2] in "命中率":
                    pass
                else:
                    team1Detail.append(zhengliren(table_line1))
            except:
                pass
        team1Summary = [zhenglidui(temp, len(team1Detail))]
        db.update({'team1Detail': team1Detail, 'team1Summary': team1Summary})
        for line in bs_single_line2:  # 主队
            try:
                table_line2 = str(' '.join(list(filter(None, line.get_text().split('\n')))))
                if table_line2[0:2] == "替补":
                    continue
                elif table_line2[0:2] == "首发":
                    str_list = list(table_line2)
                    str_list.insert(3, "位置  ")  # 添加位置
                elif table_line2[0:2] == "统计":
                    temp = table_line2
                elif table_line2[0:2] in "命中率":
                    pass
                else:
                    team2Detail.append(zhengliren(table_line2))
            except:
                pass
        team2Summary = [zhenglidui(temp, len(team2Detail))]
        db.update({'team2Detail': team2Detail, 'team2Summary': team2Summary})

        if collection2.count_documents({'url': str(db['url'])}) == 0:
            collection2.insert_one(db)
            print('比赛%s基础统计数据爬取完成' % ('http://stat-nba.com/game/' + news_id + '.html'))
        else:
            collection2.replace_one({'url': db['url']}, db)
            print('比赛%s基础统计数据更新入库' % ('http://stat-nba.com/game/' + news_id + '.html'))
    return match


def zhengliren(table_line1):
    result = []
    boxscore = table_line1.split(" ")
    result.append(boxscore[0])  # 球员名
    result.append("1")  # 填补首发字符空位
    result.append(boxscore[2])  # 时间
    a, b = boxscore[3].split('-')  # 投篮
    if a == "0" or b == "0":
        result.extend(['0%', a, b])
    else:
        result.extend(['{:.0f}%'.format(float(a) / float(b) * 100), a, b])
    a, b = boxscore[4].split('-')  # 三分
    if a == "0" or b == "0":
        result.extend(['0%', a, b])
    else:
        result.extend(['{:.0f}%'.format(float(a) / float(b) * 100), a, b])
    a, b = boxscore[5].split('-')  # 罚球
    if a == "0" or b == "0":
        result.extend(['0%', a, b])
    else:
        result.extend(['{:.0f}%'.format(float(a) / float(b) * 100), a, b])
    result.append('0%')  # 填补 真实命中率
    result.append(boxscore[8])  # 总篮板
    result.extend(boxscore[6:8])  # 前后场篮板
    result.append(boxscore[9])  # 助攻
    result.append(boxscore[11])  # 抢断
    result.append(boxscore[13])  # 盖帽
    result.append(boxscore[12])  # 失误
    result.append(boxscore[10])  # 犯规
    result.append(boxscore[14])  # 得分
    return result


def zhenglidui(table_line1, renshu):
    result = []
    boxscore = table_line1.split(" ")
    boxscore.insert(2, '')  # 填入占位对齐
    result.append(str(renshu) + "人")  # 球员名
    a, b = boxscore[3].split('-')  # 投篮
    if a == "0" or b == "0":
        result.extend(['0%', a, b])
    else:
        result.extend(['{:.0f}%'.format(float(a) / float(b) * 100), a, b])
    a, b = boxscore[4].split('-')  # 三分
    if a == "0" or b == "0":
        result.extend(['0%', a, b])
    else:
        result.extend(['{:.0f}%'.format(float(a) / float(b) * 100), a, b])
    a, b = boxscore[5].split('-')  # 罚球
    if a == "0" or b == "0":
        result.extend(['0%', a, b])
    else:
        result.extend(['{:.0f}%'.format(float(a) / float(b) * 100), a, b])
    result.append('0%')  # 填补 真实命中率
    result.append(boxscore[8])  # 总篮板
    result.extend(boxscore[6:8])  # 前后场篮板
    result.append(boxscore[9])  # 助攻
    result.append(boxscore[11])  # 抢断
    result.append(boxscore[13])  # 盖帽
    result.append(boxscore[12])  # 失误
    result.append(boxscore[10])  # 犯规
    result.append(boxscore[14])  # 得分
    return result


def get_match_list(date):
    match_list = []
    headers = {'User-Agent': random.choice(user_agent)}  # 随机选择一个机型反爬
    url = "https://nba.hupu.com/games/" + date
    html = requests.get(url, headers=headers, timeout=30)
    html.raise_for_status()
    html.encoding = 'utf-8'
    bs = BeautifulSoup(html.text, 'html.parser')
    # 以上都是载入网页
    list_box = bs.select("div.gamecenter_content_l > div.list_box")
    for box in list_box:
        team = box.find_all('tr', class_='title')[1].find_all('td')
        dict = {'date': date, 'playbyplay': '', 'boxscore': '', 'recap': '',
                'team': team[1].text + '-' + team[3].text,
                'hupu_id': box.select("div.border_a > div.table_choose.clearfix > a")[0]['href'].split('/')[-1]}
        match_list.append(dict)
    return match_list


def get_hupu(date, type, faddress):
    try:
        result = []
        db = {'date': '', 'content': '', 'url': ''}
        content = []
        url = []
        match_list = get_match_list(date)
        for match in match_list:  # 存入数据库
            content.append(match['team'])
            url.append('http://stat-nba.com/game/' + match['hupu_id'] + '.html')
        db.update({'date': date, 'content': content, 'url': url})
        if collection1.count_documents({'date': str(db['date'])}) == 0:
            collection1.insert_one(db)
            print('比赛%s列表入库' % (str(date)))
        else:
            collection1.replace_one({'date': db['date']}, db)
            print('比赛%s列表更新入库' % (str(date)))

        for match in match_list:  # 生成文件
            result.append(get_hupu_data(match, type, faddress))
            time.sleep(random.uniform(1, 3))  # 延时很重要，防止别网站拉黑
        info = {
            'successful': True,
            'data': result,
            'code': ERROR_CODE_OPERATION_SUCCESS,
            'message': '数据获取成功'
        }
        return JsonResponse(info, json_dumps_params={'ensure_ascii': False})
    except Exception as e:
        info = {
            'successful': False,
            'data': None,
            'code': ERROR_CODE_PARTNER_ERROR,
            'message': '爬取脚本出错'
        }
        return JsonResponse(info, json_dumps_params={'ensure_ascii': False})


def get_hupu_mg(date, type, faddress):
    try:
        db = {'date': '', 'content': '', 'url': ''}
        content = []
        url = []
        match_list = get_match_list(date)
        for match in match_list:
            content.append(match['team'])
            url.append('http://stat-nba.com/game/' + match['hupu_id'] + '.html')
        db.update({'date': date, 'content': content, 'url': url})
        if collection1.count_documents({'date': str(db['date'])}) == 0:
            collection1.insert_one(db)
            print('比赛%s列表入库' % (str(date)))
        else:
            collection1.replace_one({'date': db['date']}, db)
            print('比赛%s列表更新入库' % (str(date)))
        for match in match_list:
            time.sleep(random.uniform(2, 4))  # 延时很重要，防止别网站拉黑
            get_hupu_data(match, type, faddress)
    except:
        pass


if __name__ == '__main__':
    import datetime
    start = '2020-04-02'
    end = '2021-11-09'
    datestart = datetime.datetime.strptime(start, '%Y-%m-%d')
    dateend = datetime.datetime.strptime(end, '%Y-%m-%d')
    while datestart < dateend:
        time.sleep(random.uniform(3, 5))
        datestart += datetime.timedelta(days=1)
        get_hupu_mg(str(datestart.strftime('%Y-%m-%d')), 'boxscoremongodb', 'C:/Users/Administrator/Desktop/e')
