import csv
import urllib
import time
import socket
from urllib.request import urlopen
from bs4 import BeautifulSoup

# 主函数定义
def main():
    # socket.setdefaulttimeout(20) # 防止网站主机强制关闭网络连接, 设置socket层超时时间，单位为秒 (此处未使用s)

    # 打开保存爬虫结果的文件，并创建csv写入器
    savefile = open("Profile Auto Check.csv",'wt',newline='',encoding='utf-8')
    writer = csv.writer(savefile)

    # 写入标题
   # headline = ['Skill', 'Priority', 'Visibility Threshold']
    #writer.writerow(headline)

	# 爬取股票的业绩预告，并写入csv文件
    grabstockperformance(writer)

    # 关闭文件
    savefile.close()

def cancelStr(str):
    str = str.replace("\n", "")
    str = str.replace(" ", "")
    return str


# 爬取股票的业绩预告，并写入csv文件
# 参数：csv文件写入器，股票代码
def grabstockperformance(csvwriter):
    # 频繁访问网站，可能被网站服务器判断为网络攻击行为，此处需要休眠一段时间
    # time.sleep(8)     # 休眠时间单位为秒（因耗时较长，注释此行）

    # 获取网页数据
    path = 'https://cscentral-eu.amazon.com/call-center-manager/profiles/amzn1.cs-gacd.profile.A1YJVF3ZUK33U2'    # 业绩报告网址
    f = open(path,  encoding='UTF-8')
    html = f.read()                       # 读取网页内容

    # 解析网页内容
    htmlsoup = BeautifulSoup(html,"html.parser")

    textline = ["skill","priority","threshold"]
    csvwriter.writerow(textline)

    # 获取所有table，将表格的目标数据写入csv文件
    for table in htmlsoup.findAll("table"):
        for tbody in table.findAll("tbody"):
            for tr in tbody.findAll("tr"):
                textline = []
                tds=tr.findAll("td")
                skill=cancelStr(tds[0].text)
                priority=cancelStr(tds[1].text)
                threshold=cancelStr(tds[2].text)
                textline.append(skill)
                textline.append(priority)
                textline.append(threshold)
                csvwriter.writerow(textline)

main()