extern crate serde;
extern crate serde_json;
use futures::{
  future::{self, Either},
  Future,
  Stream,
};
use hyper;
use hyper::{
  Chunk, client, Client, Request, Response, Body,
};
use hyper::header::{
  ACCEPT,
  DATE,
  RETRY_AFTER
};
use hyper_tls::HttpsConnector;
use models::{Bucket, RateLimitInfo};
use tokio::timer::Delay;
use std::time::{Instant, Duration};
use std::str;
//use either;
//use either::Either;
//use std::error;
use chrono::prelude::*;

const X_RATE_LIMIT_REMAINING: &str = "x-ratelimit-remaining";

const BACKOFF_MARGIN: Duration = Duration::from_secs(10);

#[derive(Clone, Debug, PartialEq, Eq, Hash)]
struct Uri(pub String);

const BITMEX_MAX_REQ_SIZE: u32 = 500;

type MyHttpsConnector = hyper::Client<HttpsConnector<client::HttpConnector>>;

fn build_https_client() -> MyHttpsConnector {
  let https_connector: HttpsConnector<client::HttpConnector> =
    HttpsConnector::new(4).expect("TLS initialization failed");
  let https_client: hyper::Client<HttpsConnector<client::HttpConnector>> =
    Client::builder()
    .build::<_, hyper::Body>(https_connector);
  https_client
}

type MyError = ();
const MY_ERROR: () = ();

impl Uri {
  fn make_bitmex_uri(symbol: &str, page_ix: u32) -> Uri {
    Uri(
      format!(
        "https://www.bitmex.com/api/v1/trade/bucketed?\
        symbol={symbol}&\
        columns={columns}&\
        partial=false&\
        reverse=true&\
        binSize={bin_size}&\
        count={count}&\
        start={start}",
        symbol = symbol,
        columns = "close,timestamp",
        bin_size = "5m",
        count = BITMEX_MAX_REQ_SIZE,
        start = 0 + BITMEX_MAX_REQ_SIZE * page_ix
        )
      )
  }
  fn make_bitmex_uris_to_get_nlast(
    symbol: &str, bucket_count: u32
    ) -> Vec<Uri> {
    fn bucket_count_to_req_count(bucket_count: u32) -> u32 {
      let needed_req_count =
        (bucket_count as f32 / BITMEX_MAX_REQ_SIZE as f32).ceil() as u32;
      return needed_req_count
    }
    let req_count = bucket_count_to_req_count(bucket_count);
    let uris =
      (0..req_count)
      .map( |page_ix| Uri::make_bitmex_uri(symbol, page_ix) );
    uris.collect()
  }
}

#[derive(Clone)]
struct Query { pub uri: Uri }

#[derive(Clone)]
struct QueryResult { pub buckets: Vec<Bucket>, pub uri: Uri }

type ExecResult = (QueryOrQueryResult, RateLimitInfo);

fn exec(query: Query) -> Box<Future<Item=ExecResult, Error=MyError>> {
  let query = query.clone();
  let uri = &query.uri;
  let query = query.clone();
  let req = {
    //let mut req = Request::new(Method::Get, uri.0.parse().unwrap());
    let uri = &uri.0;
    let mut builder = Request::get(uri);
    builder.header(ACCEPT,"application/json");
    let req = builder.body(Body::empty()).unwrap();
    req
  };
  let fut =
    build_https_client()
    .request(req)
    .inspect( |resp| println!("HTTP {}", resp.status()) )
    .map_err(|_e| MY_ERROR)
    .and_then(
      |resp| {
        let rli = RateLimitInfo::from(&resp);
        QueryResponse(resp).to_q_or_qr(query)
          .map(
            |q_or_qr|
            (q_or_qr,  rli)
            )
      })
    .map_err(|_e| MY_ERROR);
  Box::new(fut)
}

impl RateLimitInfo {
  fn default() -> RateLimitInfo {
    let timestamp = Utc.ymd(1993, 8, 26).and_hms(6, 0, 0);
    RateLimitInfo { remaining_reqs: 1, retry_after: None, timestamp }
  }
  fn from<T>(resp: &Response<T>) -> RateLimitInfo {
    let headers = resp.headers();
    let timestamp = {
      let string = headers.get(DATE)
        .unwrap_or_else(|| panic!("DATE not on response."))
        .to_str()
        .unwrap();
      let utc_dt: DateTime<Utc> = 
        DateTime::parse_from_rfc2822(
          string
          )
        .unwrap()
        .with_timezone(&Utc);
      utc_dt
    };
    let remaining_reqs = headers.get(X_RATE_LIMIT_REMAINING)
      .unwrap_or_else(|| panic!("X_RATE_LIMIT_REMAINING not on response."))
      .to_str()
      .unwrap()
      .parse()
      .unwrap();
    let retry_after =
      match headers.get(RETRY_AFTER) {
        Some(header_value) => {
          let string = header_value.to_str().unwrap();
          println!("Retry-After value: {:?}", string);
          let seconds = string.parse().unwrap();
          let duration = Duration::from_secs(seconds);
          Some(duration)
        },
        _ => None
      };
    let rli = RateLimitInfo { remaining_reqs, retry_after, timestamp };
    println!("{:?}",rli);
    rli
  }
}

type QueryOrQueryResult = Either<Query,QueryResult>;

struct QueryResponse(Response<hyper::Body>);

impl QueryResponse {
  fn good(&self) -> bool {
    self.0.status().is_success()
  }
  fn to_q_or_qr(
    self, query: Query
    ) -> Box<Future<Item=QueryOrQueryResult,Error=MyError>> {
    let fut = if self.good() {
      let response = self.0;
      Either::B(
        future::ok(response)
        .and_then(
          |resp|
          resp
          .into_body()
          .concat2()
          .map(
            |body: Chunk| -> Vec<Bucket> {
              serde_json::from_slice(&body).unwrap()
            })
          .map(
            |buckets|
            QueryResult { buckets, uri: query.uri }
            )
          )
        .map(Either::B)
        )
    } else {
      Either::A(
        future::ok(query)
        .map(Either::A)
      )
    };
    Box::new(
      fut
      .map_err(|_e| MY_ERROR)
      )
  }
}

fn when_f<F,T>(pred_f: F, a: T, b: T) -> T
  where F: Fn(&T) -> bool {
  if pred_f(&a) { a } else { b }
}

fn is_query_result(q_or_qr: &QueryOrQueryResult) -> bool {
  // q_or_qr.is_right()
  match q_or_qr {
    Either::B(_qr) => true,
    _ => false
  }
}

fn filter_query_results(
  q_or_qrs: Vec<QueryOrQueryResult>
  ) -> Vec<QueryResult> {
  q_or_qrs
    .into_iter()
    .filter(is_query_result)
    .map(|either_variant_qr| {
      match either_variant_qr {
        Either::B(qr) => Some(qr),
        _ => None
      }
    })
    .flat_map(|x| x)
    .collect()
}

fn not(b:bool) -> bool { !b }

use futures::future::{loop_fn, Loop};

fn throttled_download(
  qs: Vec<Query>
  ) -> Box<Future<Item=(Vec<QueryResult>,RateLimitInfo),Error=MyError>>
{
  let empty_qrs: Vec<QueryResult> = Vec::with_capacity(qs.len());
  let qrs = empty_qrs;
  let default_rli = RateLimitInfo::default();
  let rli = default_rli;
  let download_loop = loop_fn(
    (qs,qrs,rli),
    |(qs,qrs,rli)|
    {
      println!("#qs {} #qrs {}", qs.len(), qrs.len());
      let qs_cloned = qs.clone();
      let mut cloned_qrs = qrs.clone();
      let delay = {
        let mut until = Instant::now();
        if let Some(retry_after) = rli.retry_after {
          let backoff = retry_after + BACKOFF_MARGIN;
          until += backoff;
        };
        Delay::new(until)
      };
      delay
        .map_err(|_e| MY_ERROR)
        .and_then(
          move |_|
          {
            let current_batch_quota =
              when_f(|n| *n > 0, rli.remaining_reqs, 1);
            println!("batch quota: {}", current_batch_quota);
            future::join_all(
              qs_cloned
              .into_iter()
              .take(current_batch_quota as usize)
              .map(|q| exec(q))
              )
          }
          )
        .map(|exec_outputs| exec_outputs.into_iter().unzip())
        .map(
          |(q_or_qrs, rlis
           ): (Vec<QueryOrQueryResult>, Vec<RateLimitInfo>)|
          {
            (
              filter_query_results(q_or_qrs),
              reduce_rlis_to_newest(rlis)
            )
          }
          )
        .and_then(
          |(new_qrs, rli)|
          {
            let new_qrs: Vec<QueryResult> = new_qrs;
            let qrs_plus_new_qrs: Vec<QueryResult> = {
              // TODO gal galiu tiesiog cia padaryti qrs.clone()
              cloned_qrs.extend(new_qrs.clone().into_iter());
              cloned_qrs
            };
            use std::collections::HashSet;
            let new_qr_uris =
              new_qrs
              .into_iter()
              .map(|qr| qr.uri)
              .collect::<HashSet<_>>();
            let qs_minus_new_qrs: Vec<Query> =
              qs.into_iter().filter(
                |q| not( new_qr_uris.contains(&q.uri) )
                )
              .collect();
            let qs = qs_minus_new_qrs;
            let qrs = qrs_plus_new_qrs;
            let all_queries_done = qs.len() == 0;
            if all_queries_done {
              future::ok(
                Loop::Break(
                  (qs, qrs, rli)
                  )
                )
            } else {
              future::ok(
                Loop::Continue(
                  (qs, qrs, rli)
                  )
                )
            }
          }
        )
        .map_err(|_e| MY_ERROR)
    }
  );
  Box::new(
    download_loop.map( |(_qs,qrs,rli)| (qrs, rli) )
  )
}

fn reduce_rlis_to_newest(rlis: Vec<RateLimitInfo>) -> RateLimitInfo {
  let first_rli = rlis.clone().into_iter().nth(0).unwrap();
  fn select_newer(
    rli1: RateLimitInfo, rli2: RateLimitInfo
    ) -> RateLimitInfo {
    if rli1 > rli2 { rli1 } else { rli2 }
  }
  let newest_rli = 
    rlis
    .into_iter()
    .fold(
      first_rli,
      |rli1, rli2|
      select_newer(rli1, rli2)
      );
  println!("Newest : {:?}", newest_rli);
  newest_rli
}

pub fn get_n_last(
  symbol: &str, bucket_count: u32
  ) -> Box<Future<Item=Vec<Bucket>,Error=MyError>> {
  let queries: Vec<Query> = {
    let uris = Uri::make_bitmex_uris_to_get_nlast(symbol, bucket_count);
    uris.into_iter().map(|uri| Query { uri }).collect()
  };
  let fut = 
    throttled_download(queries)
    .map(
      |(qrs, _rli)| {
        let mut buckets: Vec<Bucket> =
          qrs
          .into_iter()
          .map(|qr| qr.buckets)
          .flat_map(|buckets| buckets.into_iter())
          //.rev()
          .collect();
        buckets.sort_unstable();
        buckets
      });
  Box::new(fut)
}

/*
 - moved sorting to bucket level. Was sorting in query level.
 - do the body->buckets conversion early? don't pass around body at all?
*/
