import aiohttp
import asyncio
import datetime
import re
import xlwt

start_time = datetime.datetime.now()

baseurl = 'http://www.iyanghua.com/huahui/'


# class asyncCrawler:
#     def __init__(self):
#         loop = asyncio.get_event_loop()
#     async def fetch_all(self,urls):
#         _headers = {
#             'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
#                           'Chrome/79.0.3945.13 '
#                           '0 Safari/537.36'
#         }
#         _conn = aiohttp.TCPConnector(ssl=False)
#         async with aiohttp.ClientSession(headers=_headers, connector=_conn) as session:
#             _results = await asyncio.gather(*[fetch(session, url) for url in urls])
#             await session.close()
#             return _results
#
#     async def fetch(self,session, url):
#         while True:
#             try:
#                 async with session.get(url, timeout=3) as response:
#                     html = await response.text()
#                 if response.status == 200:  # 判断请求状态码
#                     print("成功。")
#                     return html
#                 else:
#                     print("失败：" + str(response.status))
#             except:
#                 print("超时。")


# 异步获取网页数据
async def fetch(session, url):
    while True:
        try:
            async with session.get(url, timeout=3) as response:
                html = await response.text()
            if response.status == 200:  # 判断请求状态码
                print("成功。")
                return html
            else:
                print("失败：" + str(response.status))
        except:
            print("超时。")


async def fetch_all(urls):
    _headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/79.0.3945.13 '
                      '0 Safari/537.36'
    }
    _conn = aiohttp.TCPConnector(ssl=False)
    async with aiohttp.ClientSession(headers=_headers, connector=_conn) as session:
        _results = await asyncio.gather(*[fetch(session, url) for url in urls])
        await session.close()
        return _results


def save_data(data_list, save_path):
    workbook = xlwt.Workbook(encoding='utf-8')
    worksheet = workbook.add_sheet('花卉', cell_overwrite_ok=True)
    col = ("名称", "别名", "原产地", "花期", "日照", "温度", "土壤", "水分", "栽植", "施肥", "浇水", "温度、光照", "病虫防治", "繁殖",
           "小贴士", "图片1", "图片2", "图片3")
    for i in range(0, len(col)):
        worksheet.write(0, i, col[i])
    for i in range(0, len(data_list)):
        print("写入表格：第%d条。" % (i + 1))
        d = data_list[i]
        for j in range(0, len(col)):
            worksheet.write(i + 1, j, d[j])
    workbook.save(save_path)


def fetch_result(tar_url_list):
    # 异步获取
    results = loop.run_until_complete(fetch_all(tar_url_list))
    return results


def get_url(html):
    get_url_re = re.compile(r'<div class="fpic"><a href="(.*)" target="_blank">')
    urls = re.findall(get_url_re, str(html))
    return urls


def get_data(html):
    data = [re.findall(re.compile(r'<h1>([\u4e00-\u9fa5]+)</h1>'), html),
            re.findall(re.compile(r'<em>别名：</em>(.*)</li>'), html),
            re.findall(re.compile(r'<em>原产地：</em>(.*)</li>'), html),
            re.findall(re.compile(r'<em>花期：</em>(.*)</li>'), html),
            re.findall(re.compile(r'<em>日照：</em>(.*)</li>'), html),
            re.findall(re.compile(r'<em>温度：</em>(.*)</li>'), html),
            re.findall(re.compile(r'<em>土壤：</em>(.*)</li>'), html),
            re.findall(re.compile(r'<em>水分：</em>(.*)</li>'), html),
            re.findall(re.compile(r'<h1 class="tag-zz"></h1><em>(.*?\n?.*?\n?.*?\n?)</em>'), html),
            re.findall(re.compile(r'<h1 class="tag-sf"></h1><em>(.*?\n?.*?\n?.*?\n?)</em>'), html),
            re.findall(re.compile(r'<h1 class="tag-js"></h1><em>(.*?\n?.*?\n?.*?\n?)</em>'), html),
            re.findall(re.compile(r'<h1 class="tag-wdgz"></h1><em>(.*?\n?.*?\n?.*?\n?)</em>'), html),
            re.findall(re.compile(r'<h1 class="tag-bcfz"></h1><em>(.*?\n?.*?\n?.*?\n?)</em>'), html),
            re.findall(re.compile(r'<h1 class="tag-fz"></h1><em>(.*?\n?.*?\n?.*?\n?)</em>'), html),
            re.findall(re.compile(r'<h1 class="tag-tags"></h1><em>(.*?\n?.*?\n?.*?\n?)</em>'), html)]
    pic = re.findall(re.compile(r'(http://img.iyanghua.com/.*?\.jpg)'), html)
    data.append(pic[0])
    data.append(pic[1])
    data.append(pic[2])
    # print(len(data))
    return data


if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    path = ".\\flower.xls"
    # 顺序构造/获取URL
    url_list1 = [baseurl + chr(i) + '.html' for i in range(65, 91)]

    # 获取url
    results1 = fetch_result(url_list1)
    url_list2 = []
    for r in results1:
        url_list2 += get_url(r)

    # 获取数据
    results2 = fetch_result(url_list2)
    data_lists = []
    for r in results2:
        data_lists.append(get_data(r))
    print(data_lists)
    # 保存数据
    save_data(data_lists, path)
    # 计时
    print("爬取完成，获得数据：" + str(len(data_lists)) + "条。")
    end_time = datetime.datetime.now()
    print("耗时：" + str((end_time - start_time).seconds) + "秒。")
