# -*- coding = utf-8 -*-
# @Time : 2021/10/19 19:23
# @Author : JingTian
# @File : GraduationThesis.py
# @Software : PyCharm

from bs4 import BeautifulSoup  # 网页解析，获取数据
import re  # 正则表达式，进行文字匹配
import urllib.request
import urllib.error  # 制定URL，获取网页数据
import xlwt  # 进行Excel操作
import sqlite3  # 进行SQLite数据库操作


def run():
    data_list = get_data()
    save_path = ".\\ 深圳二手车.xls"
    save_data(data_list, save_path)  # 保存数据

    # ask_url()


# findLink = re.compile(r'<a href="(.*?)">')  # 影片详情链接的规则。  创建正则表达式对象，表示规则（字符串的规则）
# findImgSrc = re.compile(r'<img.*src="(.*?)"', re.S)  # 影片图片的规则。  re.S 让换行符包含在字符中

findTitle = re.compile(r'<span>(.*)</span>')  # 车名
findYear = re.compile(r'<i>(.*)年</i>')  # 年份
findMileage = re.compile(r'<i>(.*)万公里</i>')  # 行驶里程
findPrice = re.compile(r'<i class="Total brand_col">(.*)<em>万</em></i>')  # 价格


def get_data():  # 得到数据
    data_list = []
    for i in range(0, 1):  # 调用获取页面的函数，10次
        url = base_url + str(i * 48)
        html = ask_url()  # 保存获取到的网页源码
        soup = BeautifulSoup(html, "html.parser")
        for item in soup.find_all('div', id="container_base"):  # 查找符合要求的字符串，形成列表
            # print(item)  # 测试：查看电影item全部信息
            data = []  # 保存一部电影的所有信息
            item = str(item)
            # link = re.findall(findLink, item)[0]  # re库用来通过正则表达式来查找指定的字符串。
            for i in range(0, 4):
                title = re.findall(findTitle, item)[i]
                data.append(title)
                year = re.findall(findYear, item)[i]
                data.append(year)
                mileage = re.findall(findMileage, item)[i]
                data.append(mileage)
                price = re.findall(findPrice, item)[i]
                data.append(price)

    data_list.append(data)
    print(data_list)

            # print(link)
            # print(title)
            # print(year, "年")
            # print(mileage, "公里")
            # print(price, "万")

    return data_list


def ask_url():
    head = {
        "User-Agent": "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 94.0.4606.81Safari / 537.36start"
    }
    request = urllib.request.Request(base_url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        # print(html)
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)

    return html


# 保存数据
def save_data(data_list, save_path):
    print("save....")
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)  # 创建workbook对象
    sheet = book.add_sheet('深圳二手车', cell_overwrite_ok=True)  # 创建工作表
    col = ('车名', '年份', '行驶里程', '价格')
    for i in range(0, 4):
        sheet.write(0, i, col[i])  # 列名
    for i in range(0, 48):
        print("第%d条" % (i + 1))
        print(i)
        print(data_list)
        print(len(data_list))
        data = data_list[i]
        for j in range(0, 4):
            sheet.write(i + 1, j, data[j])  # 数据

    book.save(save_path)  # 保存


if __name__ == '__main__':
    base_url = "https://shenzhen.taoche.com/bmw/"
    run()
