#!/usr/bin/env python
# -*- coding: utf-8 -*-

# from crawler import normalize, get_links
# import re
import random
# import logging
import requests
import urlparse
# import pymongo
# from bs4 import BeautifulSoup
# from datetime import datetime
# from pymongo import MongoClient
from utilities import get_ipproxies, make_random_useragent

UA = make_random_useragent()
headers = {
    'User-Agent': UA,
    'Host': 'www.lagou.com',
    'Origin': 'http://www.lagou.com',
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.8',
    'X-Requested-With': 'XMLHttpRequest'
}
proxies = get_ipproxies()
proxy = dict([random.choice(proxies)])
# def get_links(html):
#     """Return a list of links from html
#     """
#     # a regular expression to extract all links from the webpage
#     webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
#     # link_regex = re.compile('[a-zA-z]+://[^\s]*', re.IGNORECASE)
#     link_regex = re.compile(
#         '^(?:([A-Za-z]+):)?(/{0,3})([0-9.\-A-Za-z]+)(?:/([^?#]*))?$', re.IGNORECASE)
#     # list of all links from the webpage
#     links = []
#     links.extend(link for link in webpage_regex.findall(
#         html) if re.match(link_regex, link))
#     return links


# seed_url = 'https://www.lagou.com'
# with open('job.html') as f:
#     html = f.read()
# url = 'https://www.lagou.com/jobs/2360387.html'
# html = requests.get(url).content
# urls = get_links(html)

# for link in urls:
#     print normalize(seed_url, link)

# print len(urls)

# job_regex = re.compile('https://www.lagou.com/jobs/\d*?\.html')
# company_regex = re.compile('https://www.lagou.com/gongsi/\d*?\.html')
# for url in urls:
#     if re.match(job_regex, url):
#         print url
#     if re.match(company_regex, url):
#         print url


# def job_parser(html):
#     bshtml = BeautifulSoup(html, "lxml")
#     position_name = bshtml.find('div', class_='job-name')['title']
#     tags = [el.get_text().replace('/', '').strip()
#             for el in bshtml.find('dd', class_='job_request').find_all('span')]
#     salary = tags[0]
#     job_request = '\n'.join(tags[1:])
#     job_advantage = bshtml.find(
#         'dd', class_='job-advantage').p.get_text()
#     job_description = '\n'.join([el.get_text() for el in bshtml.find(
#         'dd', class_='job_bt').find_all('span')])
#     job_address = bshtml.find(
#         'dd', class_='job-address').find('div', class_='work_addr').get_text().strip().replace(' ', '').replace(u'查看地图', '').strip()
#     company = bshtml.find('dl', class_='job_company')
#     company_lghomepage = company.find('a')['href']
#     company_name = company.find('img')['alt']
#     return {
#         "positionName": position_name,
#         "salary": salary,
#         "job_request": job_request,
#         "info": {
#             "jobAdvantage": job_advantage,
#             "jobDescription": job_description
#         },
#         "address": job_address,
#         "company": {
#             "lagou_homepage": company_lghomepage,
#             "name": company_name
#         }
#     }


# class DownloadCallback(object):
#     '''符合条件的url调用相应的parser解析html页面并存储数据到Mongo db
#     '''

#     def __init__(self, url, html, client=None):
#         # if a client object is not passed
#         # then try connecting to mongodb at the default localhost port
#         self.client = MongoClient(
#             'localhost', 27017) if client is None else client
#         self.url = url
#         self.html = html
#         # create collection to store data,
#         # which is the equivalent of a table in a relational database
#         self.db = self.client.lagou
#         self.db.jobs.create_index('timestamp')
#         job_regex = re.compile('https://www.lagou.com/jobs/\d*?\.html')
#         if re.match(job_regex, self.url):
#             result = job_parser(self.html)
#             # Save value for this URL
#             record = {'result': result, 'timestamp': datetime.now()}
#             try:
#                 self.db.jobs.update({'_id': self.url}, {
#                                     '$set': record}, upsert=True)
#             except Exception as e:
#                 logging.debug(e)


# DownloadCallback(url, html)
# redirect_url = 'http://forbidden.lagou.com/forbidden/fb.html?ip=222.93.131.235'


def isforbidden(redirect_url):
    prased_url = urlparse.urlparse(redirect_url)
    # netloc = prased_url.netloc
    path = prased_url.path
    if 'forbidden' or 'fb.html' or 'lagouhtml' in path.split('/'):
        return True
    else:
        return False


if __name__ == '__main__':

    # proxy = {'https': 'https://101.201.116.23:3128'}
    r = requests.get('https://www.lagou.com',
                     headers=headers, proxies=proxy)
    print 'content: ', r.content
    print 'url:', r.url
    print 'ip proxy: ', proxy
    print 'history: ', r.history
    print 'status code: ', r.status_code
    print 'cokkies: ', r.cookies
    redirect_url = r.url
    print isforbidden(redirect_url)
