from bs4 import BeautifulSoup
from bs4 import UnicodeDammit
import urllib.request
import threading
import time


class Spider:
    headers = {"User-Agent": "Mozilla / 5.0(WindowsNT10.0;Win64;x64) AppleWebKit / "
                             "537.36(KHTML, likeGecko) Chrome / 70.0.3538.102Safari / 537.36Edge / 18.18363"}
    count = 0

    def load_img(self, start_url):
        try:
            urls = []
            req = urllib.request.Request(start_url, headers=self.headers)
            data = urllib.request.urlopen(req)
            data = data.read()
            dammit = UnicodeDammit(data, ['utf-8', 'gbk'])
            data = dammit.unicode_markup
            soup = BeautifulSoup(data, 'lxml')
            images = soup.select('img')
            for image in images:
                try:
                    src = image['src']
                    url = urllib.request.urljoin(start_url, src)
                    if url not in urls:
                        urls.append(url)
                        print(url)
                        self.download(url)
                except Exception as e:
                    print('============load_img1==========', e)
        except Exception as e:
            print('============load_img2==========', e)

    def download(self, url):
        try:
            self.count += 1
            if url[len(url) - 4] == '.':
                ext = url[len(url) - 4:]
            else:
                ext = ''
            req = urllib.request.Request(url, headers=self.headers)
            data = urllib.request.urlopen(req, timeout=100)
            data = data.read()
            fobj = open('./static/images/spider/' + str(self.count) + ext, 'wb')
            fobj.write(data)
            fobj.close()
            print('download' + str(self.count) + ext)
        except Exception as e:
            print('============download==========', e)

    def get_province(self, url):
        """
        :param url:
        :return: 省的链接
        """
        try:
            req = urllib.request.Request(url, headers=self.headers)
            data = urllib.request.urlopen(req)
            data = data.read()
            dammit = UnicodeDammit(data, ['utf-8', 'gbk'])
            data = dammit.unicode_markup
            soup = BeautifulSoup(data, 'html.parser')
            table = soup.select_one('table')
            province_a = table.select('a')
            province_dict = {}
            for a in province_a:
                if a.text in '全国' or a.text in a['href']:
                    continue
                province_dict[a.text] = urllib.request.urljoin('https://xingzhengquhua.51240.com/', a['href'])
            return province_dict
        except Exception as e:
            print('=========get_province========', e)

    def get_city(self, url):
        """
        :param url:
        :return: 市的名称和url链接字典
        """
        try:
            req = urllib.request.Request(url, headers=self.headers)
            data = urllib.request.urlopen(req)
            data = data.read()
            dammit = UnicodeDammit(data, ['utf-8', 'gbk'])
            data = dammit.unicode_markup
            soup = BeautifulSoup(data, 'html.parser')
            table = soup.select_one('table')
            city_a = table.select('a')
            city = {}
            for a in city_a:
                if a.text in a['href'] or a.text in '全国' or a['href'] in url:
                    continue
                city[a.text] = urllib.request.urljoin('https://xingzhengquhua.51240.com/', a['href'])
            return city
        except Exception as e:
            print('==========get_city=======', e)

    def get_district(self, url):
        """
                :param url:
                :return: 区的名称和url链接字典
                """
        try:
            req = urllib.request.Request(url, headers=self.headers)
            data = urllib.request.urlopen(req)
            data = data.read()
            dammit = UnicodeDammit(data, ['utf-8', 'gbk'])
            data = dammit.unicode_markup
            soup = BeautifulSoup(data, 'html.parser')
            table = soup.select_one('table')
            district_a = table.select('a')
            # first_a = district_a.select('a')
            province = district_a[1].text
            district = {}
            for a in district_a:
                if a.text in a['href'] or a.text in '全国' or a['href'] in url or province in a.text:
                    continue
                district[a.text] = urllib.request.urljoin('https://xingzhengquhua.51240.com/', a['href'])
            return district
        except Exception as e:
            print('==========get_district=======', e)

    def get_town(self, url):
        """
                        :param url:
                        :return: 镇的名称和url链接字典
                        """
        try:
            req = urllib.request.Request(url, headers=self.headers)
            data = urllib.request.urlopen(req)
            data = data.read()
            dammit = UnicodeDammit(data, ['utf-8', 'gbk'])
            data = dammit.unicode_markup
            soup = BeautifulSoup(data, 'html.parser')
            table = soup.select_one('table')
            town_a = table.select('a')
            province = town_a[1].text
            city = town_a[2].text
            town = []
            for a in town_a:
                if a.text in a['href'] or a.text in '全国' or a['href'] in url or province in a.text or city in a.text:
                    continue
                town.append(a.text)
            return town
        except Exception as e:
            print('==========get_town=======', e)

    def get_address(self):
        url = 'https://xingzhengquhua.51240.com/'
        address = {}
        for key, value in self.get_province(url).items():
            province = key
            for key, value in self.get_city(value).items():
                city = key
                for key, value in self.get_district(value).items():
                    district = key
                    town = self.get_town(value)
                    address[district] = town
                    address[city] = address[district]
                    address[province] = address[city]
                    time.sleep(1)
                print(city)
            print(address)
            with open('./address.txt', 'wb') as f:
                f.write(address)

        return address


if __name__ == '__main__':
    spider = Spider()
    spider.get_address()
