mod entity_cache;
mod err;
mod traits;
pub mod write;

use diesel::deserialize::FromSql;
use diesel::pg::Pg;
use diesel::serialize::{Output, ToSql};
use diesel::sql_types::Integer;
use diesel_derives::{AsExpression, FromSqlRow};
pub use entity_cache::{EntityCache, EntityLfuCache, GetScope, ModificationsAndCache};
use slog::Logger;
use tokio_stream::wrappers::ReceiverStream;

pub use super::subgraph::Entity;
pub use err::{StoreError, StoreResult};
use itertools::Itertools;
use strum_macros::Display;
pub use traits::*;
pub use write::Batch;

use serde::{Deserialize, Serialize};
use std::collections::btree_map::Entry;
use std::collections::{BTreeMap, BTreeSet, HashSet};
use std::fmt;
use std::fmt::Display;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::time::Duration;

use async_trait::async_trait;

use crate::blockchain::{Block, BlockHash, BlockPtr};
use crate::cheap_clone::CheapClone;
use crate::components::store::write::EntityModification;
use crate::data::store::scalar::Bytes;
use crate::data::store::{Id, IdList, Value};
use crate::data::value::Word;
use crate::data_source::CausalityRegion;
use crate::derive::CheapClone;
use crate::env::ENV_VARS;
use crate::internal_error;
use crate::prelude::{s, Attribute, DeploymentHash, ValueType};
use crate::schema::{ast as sast, EntityKey, EntityType, InputSchema};
use crate::util::stats::MovingStats;

#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct EntityFilterDerivative(bool);

impl EntityFilterDerivative {
    pub fn new(derived: bool) -> Self {
        Self(derived)
    }

    pub fn is_derived(&self) -> bool {
        self.0
    }
}

#[derive(Debug, Clone)]
pub struct LoadRelatedRequest {
    /// Name of the entity type.
    pub entity_type: EntityType,
    /// ID of the individual entity.
    pub entity_id: Id,
    /// Field the shall be loaded
    pub entity_field: Word,

    /// This is the causality region of the data source that created the entity.
    ///
    /// In the case of an entity lookup, this is the causality region of the data source that is
    /// doing the lookup. So if the entity exists but was created on a different causality region,
    /// the lookup will return empty.
    pub causality_region: CausalityRegion,
}

#[derive(Debug)]
pub struct DerivedEntityQuery {
    /// Name of the entity to search
    pub entity_type: EntityType,
    /// The field to check
    pub entity_field: Word,
    /// The value to compare against
    pub value: Id,

    /// This is the causality region of the data source that created the entity.
    ///
    /// In the case of an entity lookup, this is the causality region of the data source that is
    /// doing the lookup. So if the entity exists but was created on a different causality region,
    /// the lookup will return empty.
    pub causality_region: CausalityRegion,
}

impl DerivedEntityQuery {
    /// Checks if a given key and entity match this query.
    pub fn matches(&self, key: &EntityKey, entity: &Entity) -> bool {
        key.entity_type == self.entity_type
            && key.causality_region == self.causality_region
            && entity
                .get(&self.entity_field)
                .map(|v| &self.value == v)
                .unwrap_or(false)
    }
}

#[derive(Clone, Debug, PartialEq)]
pub struct Child {
    pub attr: Attribute,
    pub entity_type: EntityType,
    pub filter: Box<EntityFilter>,
    pub derived: bool,
}

/// Supported types of store filters.
#[derive(Clone, Debug, PartialEq)]
pub enum EntityFilter {
    And(Vec<EntityFilter>),
    Or(Vec<EntityFilter>),
    Equal(Attribute, Value),
    Not(Attribute, Value),
    GreaterThan(Attribute, Value),
    LessThan(Attribute, Value),
    GreaterOrEqual(Attribute, Value),
    LessOrEqual(Attribute, Value),
    In(Attribute, Vec<Value>),
    NotIn(Attribute, Vec<Value>),
    Contains(Attribute, Value),
    ContainsNoCase(Attribute, Value),
    NotContains(Attribute, Value),
    NotContainsNoCase(Attribute, Value),
    StartsWith(Attribute, Value),
    StartsWithNoCase(Attribute, Value),
    NotStartsWith(Attribute, Value),
    NotStartsWithNoCase(Attribute, Value),
    EndsWith(Attribute, Value),
    EndsWithNoCase(Attribute, Value),
    NotEndsWith(Attribute, Value),
    NotEndsWithNoCase(Attribute, Value),
    ChangeBlockGte(BlockNumber),
    Child(Child),
    Fulltext(Attribute, Value),
}

// A somewhat concise string representation of a filter
impl fmt::Display for EntityFilter {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        use EntityFilter::*;

        match self {
            And(fs) => {
                write!(f, "{}", fs.iter().map(|f| f.to_string()).join(" and "))
            }
            Or(fs) => {
                write!(f, "{}", fs.iter().map(|f| f.to_string()).join(" or "))
            }
            Equal(a, v) | Fulltext(a, v) => write!(f, "{a} = {v}"),
            Not(a, v) => write!(f, "{a} != {v}"),
            GreaterThan(a, v) => write!(f, "{a} > {v}"),
            LessThan(a, v) => write!(f, "{a} < {v}"),
            GreaterOrEqual(a, v) => write!(f, "{a} >= {v}"),
            LessOrEqual(a, v) => write!(f, "{a} <= {v}"),
            In(a, vs) => write!(f, "{a} in ({})", vs.iter().map(|v| v.to_string()).join(",")),
            NotIn(a, vs) => write!(
                f,
                "{a} not in ({})",
                vs.iter().map(|v| v.to_string()).join(",")
            ),
            Contains(a, v) => write!(f, "{a} ~ *{v}*"),
            ContainsNoCase(a, v) => write!(f, "{a} ~ *{v}*i"),
            NotContains(a, v) => write!(f, "{a} !~ *{v}*"),
            NotContainsNoCase(a, v) => write!(f, "{a} !~ *{v}*i"),
            StartsWith(a, v) => write!(f, "{a} ~ ^{v}*"),
            StartsWithNoCase(a, v) => write!(f, "{a} ~ ^{v}*i"),
            NotStartsWith(a, v) => write!(f, "{a} !~ ^{v}*"),
            NotStartsWithNoCase(a, v) => write!(f, "{a} !~ ^{v}*i"),
            EndsWith(a, v) => write!(f, "{a} ~ *{v}$"),
            EndsWithNoCase(a, v) => write!(f, "{a} ~ *{v}$i"),
            NotEndsWith(a, v) => write!(f, "{a} !~ *{v}$"),
            NotEndsWithNoCase(a, v) => write!(f, "{a} !~ *{v}$i"),
            ChangeBlockGte(b) => write!(f, "block >= {b}"),
            Child(child /* a, et, cf, _ */) => write!(
                f,
                "join on {} with {}({})",
                child.attr, child.entity_type, child.filter
            ),
        }
    }
}

// Define some convenience methods
impl EntityFilter {
    pub fn new_equal(
        attribute_name: impl Into<Attribute>,
        attribute_value: impl Into<Value>,
    ) -> Self {
        EntityFilter::Equal(attribute_name.into(), attribute_value.into())
    }

    pub fn new_in(
        attribute_name: impl Into<Attribute>,
        attribute_values: Vec<impl Into<Value>>,
    ) -> Self {
        EntityFilter::In(
            attribute_name.into(),
            attribute_values.into_iter().map(Into::into).collect(),
        )
    }

    pub fn and_maybe(self, other: Option<Self>) -> Self {
        use EntityFilter as f;
        match other {
            Some(other) => match (self, other) {
                (f::And(mut fs1), f::And(mut fs2)) => {
                    fs1.append(&mut fs2);
                    f::And(fs1)
                }
                (f::And(mut fs1), f2) => {
                    fs1.push(f2);
                    f::And(fs1)
                }
                (f1, f::And(mut fs2)) => {
                    fs2.push(f1);
                    f::And(fs2)
                }
                (f1, f2) => f::And(vec![f1, f2]),
            },
            None => self,
        }
    }
}

/// Holds the information needed to query a store.
#[derive(Clone, Debug, PartialEq)]
pub struct EntityOrderByChildInfo {
    /// The attribute of the child entity that is used to order the results.
    pub sort_by_attribute: Attribute,
    /// The attribute that is used to join to the parent and child entity.
    pub join_attribute: Attribute,
    /// If true, the child entity is derived from the parent entity.
    pub derived: bool,
}

/// Holds the information needed to order the results of a query based on nested entities.
#[derive(Clone, Debug, PartialEq)]
pub enum EntityOrderByChild {
    Object(EntityOrderByChildInfo, EntityType),
    Interface(EntityOrderByChildInfo, Vec<EntityType>),
}

/// The order in which entities should be restored from a store.
#[derive(Clone, Debug, PartialEq)]
pub enum EntityOrder {
    /// Order ascending by the given attribute. Use `id` as a tie-breaker
    Ascending(String, ValueType),
    /// Order descending by the given attribute. Use `id` as a tie-breaker
    Descending(String, ValueType),
    /// Order ascending by the given attribute of a child entity. Use `id` as a tie-breaker
    ChildAscending(EntityOrderByChild),
    /// Order descending by the given attribute of a child entity. Use `id` as a tie-breaker
    ChildDescending(EntityOrderByChild),
    /// Order by the `id` of the entities
    Default,
    /// Do not order at all. This speeds up queries where we know that
    /// order does not matter
    Unordered,
}

/// How many entities to return, how many to skip etc.
#[derive(Clone, Debug, PartialEq)]
pub struct EntityRange {
    /// Limit on how many entities to return.
    pub first: Option<u32>,

    /// How many entities to skip.
    pub skip: u32,
}

impl EntityRange {
    /// The default value for `first` that we use when the user doesn't
    /// specify one
    pub const FIRST: u32 = 100;

    /// Query for the first `n` entities.
    pub fn first(n: u32) -> Self {
        Self {
            first: Some(n),
            skip: 0,
        }
    }
}

impl std::default::Default for EntityRange {
    fn default() -> Self {
        Self {
            first: Some(Self::FIRST),
            skip: 0,
        }
    }
}

/// The attribute we want to window by in an `EntityWindow`. We have to
/// distinguish between scalar and list attributes since we need to use
/// different queries for them, and the JSONB storage scheme can not
/// determine that by itself
#[derive(Clone, Debug, PartialEq)]
pub enum WindowAttribute {
    Scalar(String),
    List(String),
}

impl WindowAttribute {
    pub fn name(&self) -> &str {
        match self {
            WindowAttribute::Scalar(name) => name,
            WindowAttribute::List(name) => name,
        }
    }
}

/// How to connect children to their parent when the child table does not
/// store parent id's
#[derive(Clone, Debug, PartialEq)]
pub enum ParentLink {
    /// The parent stores a list of child ids. The ith entry in the outer
    /// vector contains the id of the children for `EntityWindow.ids[i]`
    List(Vec<IdList>),
    /// The parent stores the id of one child. The ith entry in the
    /// vector contains the id of the child of the parent with id
    /// `EntityWindow.ids[i]`
    Scalar(IdList),
}

/// How many children a parent can have when the child stores
/// the id of the parent
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ChildMultiplicity {
    Single,
    Many,
}

impl ChildMultiplicity {
    pub fn new(field: &s::Field) -> Self {
        if sast::is_list_or_non_null_list_field(field) {
            ChildMultiplicity::Many
        } else {
            ChildMultiplicity::Single
        }
    }
}

/// How to select children for their parents depending on whether the
/// child stores parent ids (`Direct`) or the parent
/// stores child ids (`Parent`)
#[derive(Clone, Debug, PartialEq)]
pub enum EntityLink {
    /// The parent id is stored in this child attribute
    Direct(WindowAttribute, ChildMultiplicity),
    /// Join with the parents table to get at the parent id
    Parent(EntityType, ParentLink),
}

/// Window results of an `EntityQuery` query along the parent's id:
/// the `order_by`, `order_direction`, and `range` of the query apply to
/// entities that belong to the same parent. Only entities that belong to
/// one of the parents listed in `ids` will be included in the query result.
///
/// Note that different windows can vary both by the entity type and id of
/// the children, but also by how to get from a child to its parent, i.e.,
/// it is possible that two windows access the same entity type, but look
/// at different attributes to connect to parent entities
#[derive(Clone, Debug, PartialEq)]
pub struct EntityWindow {
    /// The entity type for this window
    pub child_type: EntityType,
    /// The ids of parents that should be considered for this window
    pub ids: IdList,
    /// How to get the parent id
    pub link: EntityLink,
    pub column_names: AttributeNames,
}

/// The base collections from which we are going to get entities for use in
/// `EntityQuery`; the result of the query comes from applying the query's
/// filter and order etc. to the entities described in this collection. For
/// a windowed collection order and range are applied to each individual
/// window
#[derive(Clone, Debug, PartialEq)]
pub enum EntityCollection {
    /// Use all entities of the given types
    All(Vec<(EntityType, AttributeNames)>),
    /// Use entities according to the windows. The set of entities that we
    /// apply order and range to is formed by taking all entities matching
    /// the window, and grouping them by the attribute of the window. Entities
    /// that have the same value in the `attribute` field of their window are
    /// grouped together. Note that it is possible to have one window for
    /// entity type `A` and attribute `a`, and another for entity type `B` and
    /// column `b`; they will be grouped by using `A.a` and `B.b` as the keys
    Window(Vec<EntityWindow>),
}

impl EntityCollection {
    pub fn entity_types_and_column_names(&self) -> BTreeMap<EntityType, AttributeNames> {
        let mut map = BTreeMap::new();
        match self {
            EntityCollection::All(pairs) => pairs.iter().for_each(|(entity_type, column_names)| {
                map.insert(entity_type.clone(), column_names.clone());
            }),
            EntityCollection::Window(windows) => windows.iter().for_each(
                |EntityWindow {
                     child_type,
                     column_names,
                     ..
                 }| match map.entry(child_type.clone()) {
                    Entry::Occupied(mut entry) => entry.get_mut().extend(column_names.clone()),
                    Entry::Vacant(entry) => {
                        entry.insert(column_names.clone());
                    }
                },
            ),
        }
        map
    }
}

/// The type we use for block numbers. This has to be a signed integer type
/// since Postgres does not support unsigned integer types. But 2G ought to
/// be enough for everybody
pub type BlockNumber = i32;

pub const BLOCK_NUMBER_MAX: BlockNumber = std::i32::MAX;

/// A query for entities in a store.
///
/// Details of how query generation for `EntityQuery` works can be found
/// at https://github.com/graphprotocol/rfcs/blob/master/engineering-plans/0001-graphql-query-prefetching.md
#[derive(Clone, Debug)]
pub struct EntityQuery {
    /// ID of the subgraph.
    pub subgraph_id: DeploymentHash,

    /// The block height at which to execute the query. Set this to
    /// `BLOCK_NUMBER_MAX` to run the query at the latest available block.
    /// If the subgraph uses JSONB storage, anything but `BLOCK_NUMBER_MAX`
    /// will cause an error as JSONB storage does not support querying anything
    /// but the latest block
    pub block: BlockNumber,

    /// The names of the entity types being queried. The result is the union
    /// (with repetition) of the query for each entity.
    pub collection: EntityCollection,

    /// Filter to filter entities by.
    pub filter: Option<EntityFilter>,

    /// How to order the entities
    pub order: EntityOrder,

    /// A range to limit the size of the result.
    pub range: EntityRange,

    /// Optional logger for anything related to this query
    pub logger: Option<Logger>,

    pub query_id: Option<String>,

    pub trace: bool,

    _force_use_of_new: (),
}

impl EntityQuery {
    pub fn new(
        subgraph_id: DeploymentHash,
        block: BlockNumber,
        collection: EntityCollection,
    ) -> Self {
        EntityQuery {
            subgraph_id,
            block,
            collection,
            filter: None,
            order: EntityOrder::Default,
            range: EntityRange::default(),
            logger: None,
            query_id: None,
            trace: false,
            _force_use_of_new: (),
        }
    }

    pub fn filter(mut self, filter: EntityFilter) -> Self {
        self.filter = Some(filter);
        self
    }

    pub fn order(mut self, order: EntityOrder) -> Self {
        self.order = order;
        self
    }

    pub fn range(mut self, range: EntityRange) -> Self {
        self.range = range;
        self
    }

    pub fn first(mut self, first: u32) -> Self {
        self.range.first = Some(first);
        self
    }

    pub fn skip(mut self, skip: u32) -> Self {
        self.range.skip = skip;
        self
    }

    pub fn simplify(mut self) -> Self {
        // If there is one window, with one id, in a direct relation to the
        // entities, we can simplify the query by changing the filter and
        // getting rid of the window
        if let EntityCollection::Window(windows) = &self.collection {
            if windows.len() == 1 {
                let window = windows.first().expect("we just checked");
                if window.ids.len() == 1 {
                    let id = window.ids.first().expect("we just checked").to_value();
                    if let EntityLink::Direct(attribute, _) = &window.link {
                        let filter = match attribute {
                            WindowAttribute::Scalar(name) => {
                                EntityFilter::Equal(name.clone(), id.into())
                            }
                            WindowAttribute::List(name) => {
                                EntityFilter::Contains(name.clone(), Value::from(vec![id]))
                            }
                        };
                        self.filter = Some(filter.and_maybe(self.filter));
                        self.collection = EntityCollection::All(vec![(
                            window.child_type.clone(),
                            window.column_names.clone(),
                        )]);
                    }
                }
            }
        }
        self
    }
}

/// Operation types that lead to changes in assignments
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
#[serde(rename_all = "lowercase")]
pub enum AssignmentOperation {
    /// An assignment was added or updated
    Set,
    /// An assignment was removed.
    Removed,
}

/// Assignment change events emitted by [Store](trait.Store.html) implementations.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct AssignmentChange {
    deployment: DeploymentLocator,
    operation: AssignmentOperation,
}

impl AssignmentChange {
    fn new(deployment: DeploymentLocator, operation: AssignmentOperation) -> Self {
        Self {
            deployment,
            operation,
        }
    }

    pub fn set(deployment: DeploymentLocator) -> Self {
        Self::new(deployment, AssignmentOperation::Set)
    }

    pub fn removed(deployment: DeploymentLocator) -> Self {
        Self::new(deployment, AssignmentOperation::Removed)
    }

    pub fn into_parts(self) -> (DeploymentLocator, AssignmentOperation) {
        (self.deployment, self.operation)
    }
}

#[derive(Clone, Debug, Serialize, Deserialize)]
/// The store emits `StoreEvents` to indicate that some entities have changed.
/// For block-related data, at most one `StoreEvent` is emitted for each block
/// that is processed. The `changes` vector contains the details of what changes
/// were made, and to which entity.
///
/// Since the 'subgraph of subgraphs' is special, and not directly related to
/// any specific blocks, `StoreEvents` for it are generated as soon as they are
/// written to the store.
pub struct StoreEvent {
    // The tag is only there to make it easier to track StoreEvents in the
    // logs as they flow through the system
    pub tag: usize,
    pub changes: HashSet<AssignmentChange>,
}

impl StoreEvent {
    pub fn new(changes: Vec<AssignmentChange>) -> StoreEvent {
        let changes = changes.into_iter().collect();
        StoreEvent::from_set(changes)
    }

    fn from_set(changes: HashSet<AssignmentChange>) -> StoreEvent {
        static NEXT_TAG: AtomicUsize = AtomicUsize::new(0);

        let tag = NEXT_TAG.fetch_add(1, Ordering::Relaxed);
        StoreEvent { tag, changes }
    }

    pub fn extend(mut self, other: StoreEvent) -> Self {
        self.changes.extend(other.changes);
        self
    }
}

impl fmt::Display for StoreEvent {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(
            f,
            "StoreEvent[{}](changes: {})",
            self.tag,
            self.changes.len()
        )
    }
}

impl PartialEq for StoreEvent {
    fn eq(&self, other: &StoreEvent) -> bool {
        // Ignore tag for equality
        self.changes == other.changes
    }
}

/// A boxed `StoreEventStream`
pub type StoreEventStreamBox = ReceiverStream<Arc<StoreEvent>>;

/// An entity operation that can be transacted into the store.
#[derive(Clone, Debug, PartialEq)]
pub enum EntityOperation {
    /// Locates the entity specified by `key` and sets its attributes according to the contents of
    /// `data`.  If no entity exists with this key, creates a new entity.
    Set { key: EntityKey, data: Entity },

    /// Removes an entity with the specified key, if one exists.
    Remove { key: EntityKey },
}

#[derive(Debug, PartialEq)]
pub enum UnfailOutcome {
    Noop,
    Unfailed,
}

#[derive(Clone, PartialEq, Eq, Debug)]
pub struct StoredDynamicDataSource {
    pub manifest_idx: u32,
    pub param: Option<Bytes>,
    pub context: Option<serde_json::Value>,
    pub creation_block: Option<BlockNumber>,
    pub done_at: Option<i32>,
    pub causality_region: CausalityRegion,
}

/// An internal identifer for the specific instance of a deployment. The
/// identifier only has meaning in the context of a specific instance of
/// graph-node. Only store code should ever construct or consume it; all
/// other code passes it around as an opaque token.
#[derive(
    Copy,
    Clone,
    CheapClone,
    Debug,
    Serialize,
    Deserialize,
    PartialEq,
    Eq,
    Hash,
    AsExpression,
    FromSqlRow,
)]
#[diesel(sql_type = Integer)]
pub struct DeploymentId(pub i32);

impl Display for DeploymentId {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
        write!(f, "{}", self.0)
    }
}

impl DeploymentId {
    pub fn new(id: i32) -> Self {
        Self(id)
    }
}

impl FromSql<Integer, Pg> for DeploymentId {
    fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result<Self> {
        let id = <i32 as FromSql<Integer, Pg>>::from_sql(bytes)?;
        Ok(DeploymentId(id))
    }
}

impl ToSql<Integer, Pg> for DeploymentId {
    fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> diesel::serialize::Result {
        <i32 as ToSql<Integer, Pg>>::to_sql(&self.0, out)
    }
}

/// A unique identifier for a deployment that specifies both its external
/// identifier (`hash`) and its unique internal identifier (`id`) which
/// ensures we are talking about a unique location for the deployment's data
/// in the store
#[derive(Clone, CheapClone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct DeploymentLocator {
    pub id: DeploymentId,
    pub hash: DeploymentHash,
}

impl slog::Value for DeploymentLocator {
    fn serialize(
        &self,
        record: &slog::Record,
        key: slog::Key,
        serializer: &mut dyn slog::Serializer,
    ) -> slog::Result {
        slog::Value::serialize(&self.to_string(), record, key, serializer)
    }
}

impl DeploymentLocator {
    pub fn new(id: DeploymentId, hash: DeploymentHash) -> Self {
        Self { id, hash }
    }
}

impl Display for DeploymentLocator {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(f, "{}[{}]", self.hash, self.id)
    }
}

// The type that the connection pool uses to track wait times for
// connection checkouts
pub type PoolWaitStats = Arc<RwLock<MovingStats>>;

/// Determines which columns should be selected in a table.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum AttributeNames {
    /// Select all columns. Equivalent to a `"SELECT *"`.
    All,
    /// Individual column names to be selected.
    Select(BTreeSet<String>),
}

impl AttributeNames {
    fn insert(&mut self, column_name: &str) {
        match self {
            AttributeNames::All => {
                let mut set = BTreeSet::new();
                set.insert(column_name.to_string());
                *self = AttributeNames::Select(set)
            }
            AttributeNames::Select(set) => {
                set.insert(column_name.to_string());
            }
        }
    }

    pub fn update(&mut self, field_name: &str) {
        if Self::is_meta_field(field_name) {
            return;
        }
        self.insert(field_name)
    }

    /// Adds a attribute name. Ignores meta fields.
    pub fn add_str(&mut self, field_name: &str) {
        if Self::is_meta_field(field_name) {
            return;
        }
        self.insert(field_name);
    }

    /// Returns `true` for meta field names, `false` otherwise.
    fn is_meta_field(field_name: &str) -> bool {
        field_name.starts_with("__")
    }

    pub fn extend(&mut self, other: Self) {
        use AttributeNames::*;
        match (self, other) {
            (All, All) => {}
            (self_ @ All, other @ Select(_)) => *self_ = other,
            (Select(_), All) => {
                unreachable!()
            }
            (Select(a), Select(b)) => a.extend(b),
        }
    }
}

#[derive(Debug, Clone)]
pub struct PartialBlockPtr {
    pub number: BlockNumber,
    pub hash: Option<BlockHash>,
}

impl From<BlockNumber> for PartialBlockPtr {
    fn from(number: BlockNumber) -> Self {
        Self { number, hash: None }
    }
}

#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum DeploymentSchemaVersion {
    /// V0, baseline version, in which:
    /// - A relational schema is used.
    /// - Each deployment has its own namespace for entity tables.
    /// - Dynamic data sources are stored in `subgraphs.dynamic_ethereum_contract_data_source`.
    V0 = 0,

    /// V1: Dynamic data sources moved to `sgd*.data_sources$`.
    V1 = 1,
}

impl DeploymentSchemaVersion {
    // Latest schema version supported by this version of graph node.
    pub const LATEST: Self = Self::V1;

    pub fn private_data_sources(self) -> bool {
        use DeploymentSchemaVersion::*;
        match self {
            V0 => false,
            V1 => true,
        }
    }
}

impl TryFrom<i32> for DeploymentSchemaVersion {
    type Error = StoreError;

    fn try_from(value: i32) -> Result<Self, Self::Error> {
        match value {
            0 => Ok(Self::V0),
            1 => Ok(Self::V1),
            _ => Err(StoreError::UnsupportedDeploymentSchemaVersion(value)),
        }
    }
}

impl fmt::Display for DeploymentSchemaVersion {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        fmt::Display::fmt(&(*self as i32), f)
    }
}

/// A `ReadStore` that is always empty.
pub struct EmptyStore {
    schema: InputSchema,
}

impl EmptyStore {
    pub fn new(schema: InputSchema) -> Self {
        EmptyStore { schema }
    }
}

#[async_trait]
impl ReadStore for EmptyStore {
    async fn get(&self, _key: &EntityKey) -> Result<Option<Entity>, StoreError> {
        Ok(None)
    }

    async fn get_many(
        &self,
        _: BTreeSet<EntityKey>,
    ) -> Result<BTreeMap<EntityKey, Entity>, StoreError> {
        Ok(BTreeMap::new())
    }

    async fn get_derived(
        &self,
        _query: &DerivedEntityQuery,
    ) -> Result<BTreeMap<EntityKey, Entity>, StoreError> {
        Ok(BTreeMap::new())
    }

    fn input_schema(&self) -> InputSchema {
        self.schema.cheap_clone()
    }
}

/// An estimate of the number of entities and the number of entity versions
/// in a database table
#[derive(Clone, Debug)]
pub struct VersionStats {
    pub entities: i64,
    pub versions: i64,
    pub tablename: String,
    /// The ratio `entities / versions`
    pub ratio: f64,
    /// The last block to which this table was pruned
    pub last_pruned_block: Option<BlockNumber>,
    /// Histograms for the upper bounds of the block ranges in
    /// this table. Each histogram bucket contains roughly the same number
    /// of rows; values might be repeated to achieve that. The vectors are
    /// empty if the table hasn't been analyzed, the subgraph is stored in
    /// Postgres version 16 or lower, or if the table doesn't have a
    /// block_range column.
    pub block_range_upper: Vec<BlockNumber>,
}

/// What phase of pruning we are working on
pub enum PrunePhase {
    /// Handling final entities
    CopyFinal,
    /// Handling nonfinal entities
    CopyNonfinal,
    /// Delete unneeded entity versions
    Delete,
}

impl PrunePhase {
    pub fn strategy(&self) -> PruningStrategy {
        match self {
            PrunePhase::CopyFinal | PrunePhase::CopyNonfinal => PruningStrategy::Rebuild,
            PrunePhase::Delete => PruningStrategy::Delete,
        }
    }
}

/// Callbacks for `SubgraphStore.prune` so that callers can report progress
/// of the pruning procedure to users
#[allow(unused_variables)]
pub trait PruneReporter: Send + 'static {
    /// A pruning run has started. It will use the given `strategy` and
    /// remove `history_frac` part of the blocks of the deployment, which
    /// amounts to `history_blocks` many blocks.
    ///
    /// Before pruning, the subgraph has data for blocks from
    /// `earliest_block` to `latest_block`
    fn start(&mut self, req: &PruneRequest) {}

    fn start_analyze(&mut self) {}
    fn start_analyze_table(&mut self, table: &str) {}
    fn finish_analyze_table(&mut self, table: &str) {}

    /// Analyzing tables has finished. `stats` are the stats for all tables
    /// in the deployment, `analyzed ` are the names of the tables that were
    /// actually analyzed
    fn finish_analyze(&mut self, stats: &[VersionStats], analyzed: &[&str]) {}

    fn start_table(&mut self, table: &str) {}
    fn prune_batch(&mut self, table: &str, rows: usize, phase: PrunePhase, finished: bool) {}
    fn start_switch(&mut self) {}
    fn finish_switch(&mut self) {}
    fn finish_table(&mut self, table: &str) {}

    fn finish(&mut self) {}
}

/// Select how pruning should be done
#[derive(Clone, Copy, Debug, Display, PartialEq)]
pub enum PruningStrategy {
    /// Rebuild by copying the data we want to keep to new tables and swap
    /// them out for the existing tables
    Rebuild,
    /// Delete unneeded data from the existing tables
    Delete,
}

#[derive(Copy, Clone)]
/// A request to prune a deployment. This struct encapsulates decision
/// making around the best strategy for pruning (deleting historical
/// entities or copying current ones) It needs to be filled with accurate
/// information about the deployment that should be pruned.
pub struct PruneRequest {
    /// How many blocks of history to keep
    pub history_blocks: BlockNumber,
    /// The reorg threshold for the chain the deployment is on
    pub reorg_threshold: BlockNumber,
    /// The earliest block pruning should preserve
    pub earliest_block: BlockNumber,
    /// The last block that contains final entities not subject to a reorg
    pub final_block: BlockNumber,
    /// The first block for which the deployment contained entities when the
    /// request was made
    pub first_block: BlockNumber,
    /// The latest block, i.e., the subgraph head
    pub latest_block: BlockNumber,
    /// Use the rebuild strategy when removing more than this fraction of
    /// history. Initialized from `ENV_VARS.store.rebuild_threshold`, but
    /// can be modified after construction
    pub rebuild_threshold: f64,
    /// Use the delete strategy when removing more than this fraction of
    /// history but less than `rebuild_threshold`. Initialized from
    /// `ENV_VARS.store.delete_threshold`, but can be modified after
    /// construction
    pub delete_threshold: f64,
}

impl PruneRequest {
    /// Create a `PruneRequest` for a deployment that currently contains
    /// entities for blocks from `first_block` to `latest_block` that should
    /// retain only `history_blocks` blocks of history and is subject to a
    /// reorg threshold of `reorg_threshold`.
    pub fn new(
        deployment: &DeploymentLocator,
        history_blocks: BlockNumber,
        reorg_threshold: BlockNumber,
        first_block: BlockNumber,
        latest_block: BlockNumber,
    ) -> Result<Self, StoreError> {
        let rebuild_threshold = ENV_VARS.store.rebuild_threshold;
        let delete_threshold = ENV_VARS.store.delete_threshold;
        if rebuild_threshold < 0.0 || rebuild_threshold > 1.0 {
            return Err(internal_error!(
                "the copy threshold must be between 0 and 1 but is {rebuild_threshold}"
            ));
        }
        if delete_threshold < 0.0 || delete_threshold > 1.0 {
            return Err(internal_error!(
                "the delete threshold must be between 0 and 1 but is {delete_threshold}"
            ));
        }
        if history_blocks <= reorg_threshold {
            return Err(internal_error!(
                "the deployment {} needs to keep at least {} blocks \
                   of history and can't be pruned to only {} blocks of history",
                deployment,
                reorg_threshold + 1,
                history_blocks
            ));
        }
        if first_block >= latest_block {
            return Err(internal_error!(
                "the earliest block {} must be before the latest block {}",
                first_block,
                latest_block
            ));
        }

        let earliest_block = latest_block - history_blocks;
        let final_block = latest_block - reorg_threshold;

        Ok(Self {
            history_blocks,
            reorg_threshold,
            earliest_block,
            final_block,
            latest_block,
            first_block,
            rebuild_threshold,
            delete_threshold,
        })
    }

    /// Determine what strategy to use for pruning
    ///
    /// We are pruning `history_pct` of the blocks from a table that has a
    /// ratio of `version_ratio` entities to versions. If we are removing
    /// more than `rebuild_threshold` percent of the versions, we prune by
    /// rebuilding, and if we are removing more than `delete_threshold`
    /// percent of the versions, we prune by deleting. If we would remove
    /// less than `delete_threshold` percent of the versions, we don't
    /// prune.
    pub fn strategy(&self, stats: &VersionStats) -> Option<PruningStrategy> {
        // If the deployment doesn't have enough history to cover the reorg
        // threshold, do not prune
        if self.earliest_block >= self.final_block {
            return None;
        }

        let removal_ratio = if stats.block_range_upper.is_empty()
            || ENV_VARS.store.prune_disable_range_bound_estimation
        {
            // Estimate how much data we will throw away; we assume that
            // entity versions are distributed evenly across all blocks so
            // that `history_pct` will tell us how much of that data pruning
            // will remove.
            self.history_pct(stats) * (1.0 - stats.ratio)
        } else {
            // This estimate is more accurate than the one above since it
            // does not assume anything about the distribution of entities
            // and versions but uses the estimates from Postgres statistics.
            // Of course, we can only use it if we have statistics
            self.remove_pct_from_bounds(stats)
        };

        if removal_ratio >= self.rebuild_threshold {
            Some(PruningStrategy::Rebuild)
        } else if removal_ratio >= self.delete_threshold {
            Some(PruningStrategy::Delete)
        } else {
            None
        }
    }

    /// Return an estimate of the fraction of the entities that are
    /// historical in the table whose `stats` we are given
    fn history_pct(&self, stats: &VersionStats) -> f64 {
        let total_blocks = self.latest_block - stats.last_pruned_block.unwrap_or(0);
        if total_blocks <= 0 || total_blocks < self.history_blocks {
            // Something has gone very wrong; this could happen if the
            // subgraph is ever rewound to before the last_pruned_block or
            // if this is called when the subgraph has fewer blocks than
            // history_blocks. In both cases, which should be transient,
            // pretend that we would not delete any history
            0.0
        } else {
            1.0 - self.history_blocks as f64 / total_blocks as f64
        }
    }

    /// Return the fraction of entities that we will remove according to the
    /// histogram bounds in `stats`. That fraction can be estimated as the
    /// fraction of histogram buckets that end before `self.earliest_block`
    fn remove_pct_from_bounds(&self, stats: &VersionStats) -> f64 {
        stats
            .block_range_upper
            .iter()
            .filter(|b| **b <= self.earliest_block)
            .count() as f64
            / stats.block_range_upper.len() as f64
    }
}

/// Represents an item retrieved from an
/// [`EthereumCallCache`](super::EthereumCallCache) implementor.
pub struct CachedEthereumCall {
    /// The BLAKE3 hash that uniquely represents this cache item. The way this
    /// hash is constructed is an implementation detail.
    pub blake3_id: Vec<u8>,

    /// Block details related to this Ethereum call.
    pub block_ptr: BlockPtr,

    /// The address to the called contract.
    pub contract_address: ethabi::Address,

    /// The encoded return value of this call.
    pub return_value: Vec<u8>,
}
