# -*- coding: utf-8 -*-


import scrapy
import requests
from bs4 import BeautifulSoup
from datascrapy.items import DbRowItem
from datascrapy.settings import URL
from datascrapy.settings import COOKIE
from datascrapy.settings import HEADERS
from datascrapy.settings import DATAHEADERS
from datascrapy.settings import SYSNAME
from datascrapy.settings import ALLOWED_DOMAINS
from datascrapy.settings import START_URLS
from datascrapy.util import cookieutil


class DbrowSpider(scrapy.Spider):
    name = 'dbrow'
    allowed_domains = ALLOWED_DOMAINS
    start_urls = START_URLS

    def parse(self, response):
        response = requests.get(URL, headers=HEADERS)
        cookieutil.set_response_cookie(response)
        soup = BeautifulSoup(response.text, "html.parser")
        span = soup.find("span", text=SYSNAME)
        a_list = span.findParent().findParent().select('a[href^="/getfun/default"]')

        for a in a_list:
            href = a.attrs['href']
            href_str = str(href)
            href_strid_end = href_str.split("?")[1]
            params = href_strid_end.split("&")
            nei_count = 0
            id = ""
            host = ""
            name = ""
            for param in params:
                if nei_count == 0:
                    id = param.split("=", 1)[1]
                if nei_count == 2:
                    name = param.split("=", 1)[1]
                if nei_count == 3:
                    host = param.split("=", 1)[1]
                nei_count = nei_count + 1
                if host == "10.235.94.116":
                    print("数据拉取结束")
                    return
            is_main_dburl = "http://dbms.itsm.cxxxxs.com/awr/page?" + "db=" + id + "&type=3" + "&dbname=" + name + "&nidx=2"
            DATAHEADERS["Referer"] = is_main_dburl
            DATAHEADERS["Cookie"] = COOKIE + cookieutil.get_response_cookie()
            is_main_response = requests.get(is_main_dburl, headers=DATAHEADERS)
            cookieutil.set_response_cookie(is_main_response)

            is_main_response_text = BeautifulSoup(is_main_response.text, "html.parser")
            li = is_main_response_text.find("li", class_="first")
            if li.find("div", class_="value").text == "主库":
                data_url = "http://dbms.itsm.cxxxxs.com/awr/page?" + "db=" + id + "&type=3" + "&dbname=" + name + "&nidx=5"
                DATAHEADERS["Referer"] = data_url
                DATAHEADERS["Cookie"] = COOKIE + cookieutil.get_response_cookie()
                # # 拼装数据
                table_json_response = requests.post(data_url, headers=DATAHEADERS)
                cookieutil.set_response_cookie(table_json_response)
                table_json_response_text = BeautifulSoup(table_json_response.text, "html.parser")
                tab = table_json_response_text.find("tbody")
                for tr in tab.findAll("tr"):
                    dbrow_item = DbRowItem()
                    dbrow_item["sysName"] = SYSNAME
                    dbrow_item["ip"] = host
                    index = 0
                    if index == 5:
                        index = index - 5
                    for td in tr.findAll("td"):
                        tdvalue = str(td.getText()).replace("\n", "").strip()
                        if index == 0:
                            dbrow_item["dbName"] = tdvalue
                        if index == 1:
                            dbrow_item["tableName"] = tdvalue
                        if index == 2:
                            dbrow_item["rowNums"] = tdvalue
                        if index == 3:
                            dbrow_item["dataSize"] = tdvalue
                        if index == 4:
                            dbrow_item["fragmentRate"] = tdvalue
                        index = index + 1
                    yield dbrow_item
