Add collection info, creation, deletion, points insertion

This commit is contained in:
Andrey Tkachenko 2022-04-04 20:00:00 +04:00
commit cbb8e48392
8 changed files with 2287 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/target

1064
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

16
Cargo.toml Normal file
View File

@ -0,0 +1,16 @@
[package]
name = "qdrant_rust_client"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
serde = "1.0.136"
serde_derive = "1.0.136"
serde_json = "1.0.79"
thiserror = "1.0.30"
reqwest = { version = "0.11.10", features = ["json", "serde_json"] }
[dev-dependencies]
tokio = { version = "1.17.0", features = ["full"] }

65
examples/get_info.rs Normal file
View File

@ -0,0 +1,65 @@
use qdrant_rust_client::{
models::{CollectionsResponse, Distance, InlineResponse, PointStruct},
Client,
};
#[tokio::main]
async fn main() {
let client = Client::new("localhost", 6333, "http");
let coll = "test_collection";
let res = client
.create_collection(coll, 8, Distance::Cosine)
.await
.unwrap();
let points = vec![
PointStruct {
id: 1.into(),
payload: None,
vector: vec![1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
},
PointStruct {
id: 2.into(),
payload: None,
vector: vec![1.0, 1.0, 2.0, 4.0, 4.0, 5.0, 6.0, 8.0],
},
PointStruct {
id: 3.into(),
payload: None,
vector: vec![0.0, 1.0, 3.0, 3.0, 4.0, 5.0, 6.0, 8.0],
},
PointStruct {
id: 4.into(),
payload: None,
vector: vec![0.0, 1.0, 4.0, 3.0, 4.0, 5.0, 6.0, 7.0],
},
PointStruct {
id: 5.into(),
payload: None,
vector: vec![1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
},
PointStruct {
id: 6.into(),
payload: None,
vector: vec![0.0, 2.0, 2.0, 4.0, 4.0, 5.0, 6.0, 7.0],
},
PointStruct {
id: 7.into(),
payload: None,
vector: vec![1.0, 3.0, 2.0, 4.0, 5.0, 5.0, 7.0, 7.0],
},
PointStruct {
id: 8.into(),
payload: None,
vector: vec![0.0, 4.0, 2.0, 3.0, 5.0, 5.0, 7.0, 7.0],
},
];
let resp = client.collection_add_points(coll, points).await.unwrap();
println!("{:#?}", resp);
let info = client.get_collection_info(coll).await.unwrap();
let res = client.delete_collection(coll).await.unwrap();
println!("{:#?}", info);
}

10
src/error.rs Normal file
View File

@ -0,0 +1,10 @@
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error("Reqwest Error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("{0}")]
QDrantError(String),
}

146
src/lib.rs Normal file
View File

@ -0,0 +1,146 @@
use std::borrow::Cow;
use error::Error;
use models::{
CollectionInfo, CollectionParams, Distance, Filter, InlineResponseResult, InlineResponseStatus,
PointStruct, PointsList, UpdateResult,
};
use crate::models::InlineResponse;
pub mod error;
pub mod models;
pub struct Client {
host: Cow<'static, str>,
port: u16,
schema: Cow<'static, str>,
}
impl Client {
pub fn new<H: Into<Cow<'static, str>>, S: Into<Cow<'static, str>>>(
host: H,
port: u16,
schema: S,
) -> Self {
Self {
host: host.into(),
port,
schema: schema.into(),
}
}
pub async fn get_collection_info(&self, collection: &str) -> Result<CollectionInfo, Error> {
let url = self.build_url(collection, "");
let res = reqwest::get(&url).await?;
let res: InlineResponse = res.json().await?;
match res.status {
InlineResponseStatus::Error(err) => {
Err(Error::QDrantError(err.error.unwrap_or(String::from(""))))
}
InlineResponseStatus::Ok(_) => match res.result {
Some(inner) => match inner {
InlineResponseResult::CollectionInfo(info) => Ok(info),
_ => unreachable!(),
},
None => unimplemented!(),
},
}
}
pub async fn create_collection(
&self,
collection: &str,
vector_size: i64,
distance: Distance,
) -> Result<bool, Error> {
let req = CollectionParams {
vector_size,
distance,
};
let url = self.build_url(collection, "");
let client = reqwest::Client::new();
let res: InlineResponse = client.put(&url).json(&req).send().await?.json().await?;
match res.status {
InlineResponseStatus::Error(err) => {
Err(Error::QDrantError(err.error.unwrap_or(String::from(""))))
}
InlineResponseStatus::Ok(_) => match res.result {
Some(inner) => match inner {
InlineResponseResult::Bool(resp) => Ok(resp),
_ => unreachable!(),
},
None => unimplemented!(),
},
}
}
pub async fn delete_collection(&self, collection: &str) -> Result<bool, Error> {
let url = self.build_url(collection, "");
let client = reqwest::Client::new();
let res: InlineResponse = client.delete(&url).send().await?.json().await?;
match res.status {
InlineResponseStatus::Error(err) => {
Err(Error::QDrantError(err.error.unwrap_or(String::from(""))))
}
InlineResponseStatus::Ok(_) => match res.result {
Some(inner) => match inner {
InlineResponseResult::Bool(resp) => Ok(resp),
_ => unreachable!(),
},
None => unimplemented!(),
},
}
}
pub async fn collection_add_points(
&self,
collection: &str,
points: Vec<PointStruct>,
) -> Result<UpdateResult, Error> {
let req = PointsList { points };
let url = self.build_url(collection, "/points?wait=true");
let client = reqwest::Client::new();
let res: InlineResponse = client.put(&url).json(&req).send().await?.json().await?;
match res.status {
InlineResponseStatus::Error(err) => {
Err(Error::QDrantError(err.error.unwrap_or(String::from(""))))
}
InlineResponseStatus::Ok(_) => match res.result {
Some(inner) => match inner {
InlineResponseResult::UpdateResult(resp) => Ok(resp),
_ => unreachable!(),
},
None => unimplemented!(),
},
}
}
pub async fn collection_search_points(
&self,
vector: Vec<f64>,
top: u64,
filter: Option<Filter>,
) -> Result<(), Error> {
Ok(())
}
// pub async fn collection_add_points_batch(&self, batch_ids: &[], batch_vectors: &[Vec<64>], batch_payloads: Option<>) -> Result<(), Error> {
// }
fn build_url(&self, collection: &str, post: &str) -> String {
format!(
"{}://{}:{}/collections/{collection}{post}",
self.schema, self.host, self.port
)
}
}

982
src/models/collection.rs Normal file
View File

@ -0,0 +1,982 @@
use serde_derive::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
#[derive(Debug, Clone, Deserialize)]
pub enum OkStatus {
#[serde(rename = "ok")]
Ok,
}
#[derive(Debug, Clone, Deserialize)]
pub enum CollectionStatus {
#[serde(rename = "green")]
Green,
#[serde(rename = "yellow")]
Yellow,
#[serde(rename = "red")]
Red,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Distance {
Cosine,
Euclid,
Dot,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Match {
MatchKeyword(MatchKeyword),
MatchInteger(MatchInteger),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AliasOperations {
CreateAliasOperation(CreateAliasOperation),
DeleteAliasOperation(DeleteAliasOperation),
RenameAliasOperation(RenameAliasOperation),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum ExtendedPointId {
Integer(i64),
String(String),
}
impl From<i64> for ExtendedPointId {
fn from(val: i64) -> Self {
Self::Integer(val)
}
}
impl From<String> for ExtendedPointId {
fn from(val: String) -> Self {
Self::String(val)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PointsSelector {
PointIdsList(PointIdsList),
FilterSelector(FilterSelector),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PayloadSelector {
PayloadSelectorInclude(PayloadSelectorInclude),
PayloadSelectorExclude(PayloadSelectorExclude),
}
#[derive(Debug, Clone, Deserialize)]
pub enum WithPayloadInterface {
PayloadSelector(PayloadSelector),
StringList(Vec<String>),
Bool(bool),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Condition {
FieldCondition(FieldCondition),
HasIdCondition(HasIdCondition),
Filter(Filter),
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub enum PointInsertOperations {
PointsBatch(PointsBatch),
PointsList(PointsList),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Batch {
pub ids: Vec<ExtendedPointId>,
pub payloads: Option<Vec<HashMap<String, Value>>>,
pub vectors: Vec<Vec<f64>>,
}
///
/// Operation for performing changes of collection aliases. Alias changes are atomic, meaning
/// that no collection modifications can happen between alias operations.
///
#[derive(Debug, Clone, Deserialize)]
pub struct ChangeAliasesOperation {
/// Operation for performing changes of collection aliases. Alias changes are atomic, meaning
/// that no collection modifications can happen between alias operations.
pub actions: Vec<AliasOperations>,
}
#[derive(Debug, Clone, Deserialize)]
pub enum UpdateStatus {
/// acknowledged
#[serde(rename = "acknowledged")]
Acknowledged,
// completed
#[serde(rename = "completed")]
Completed,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CollectionParams {
/// Size of a vectors used
pub vector_size: i64,
pub distance: Distance,
}
#[derive(Debug, Clone, Deserialize)]
pub struct WalConfig {
///
/// Size of a single WAL segment in MB
///
wal_capacity_mb: i64,
///
/// Number of WAL segments to create ahead of actually used ones
///
wal_segments_ahead: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct WalConfigDiff {
///
/// Size of a single WAL segment in MB
wal_capacity_mb: Option<i64>,
///
/// Number of WAL segments to create ahead of actually used ones
wal_segments_ahead: Option<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CollectionDescription {
name: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CollectionsResponse {
collections: Vec<CollectionDescription>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CollectionConfig {
hnsw_config: HnswConfig,
params: CollectionParams,
optimizer_config: OptimizersConfig,
wal_config: WalConfig,
}
///
/// Current statistics and configuration of the collection
///
#[derive(Debug, Clone, Deserialize)]
pub struct CollectionInfo {
///
/// Current statistics and configuration of the collection
status: CollectionStatus,
///
/// Current statistics and configuration of the collection
optimizer_status: OptimizersStatus,
///
/// Number of vectors in collection
vectors_count: i64,
///
/// Number of segments in collection
segments_count: i64,
///
/// Disk space, used by collection
disk_data_size: i64,
///
/// RAM used by collection
ram_data_size: i64,
///
/// Current statistics and configuration of the collection
config: CollectionConfig,
///
/// Types of stored payload
payload_schema: HashMap<String, PayloadSchemaInfo>,
}
///
/// Create alternative name for a collection. Collection will be available under both names for
/// search, retrieve,
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateAlias {
/// Create alternative name for a collection. Collection will be available under both names for
/// search, retrieve,
alias_name: String,
/// Create alternative name for a collection. Collection will be available under both names for
/// search, retrieve,
collection_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateAliasOperation {
create_alias: CreateAlias,
}
///
/// Operation for creating new collection and (optionally) specify index params
///
#[derive(Debug, Clone, Deserialize)]
pub struct CreateCollection {
/// Operation for creating new collection and (optionally) specify index params
pub distance: Distance,
/// Custom params for HNSW index. If none - values from service configuration file are used.
pub hnsw_config: Option<HnswConfigDiff>,
/// Custom params for Optimizers. If none - values from service configuration file are used.
pub optimizers_config: Option<OptimizersConfigDiff>,
/// Operation for creating new collection and (optionally) specify index params
pub vector_size: i64,
/// Custom params for WAL. If none - values from service configuration file are used.
pub wal_config: Option<WalConfigDiff>,
}
///
/// Operation for creating new collection and (optionally) specify index params
///
#[derive(Debug, Clone, Deserialize)]
pub struct CreateCollectionOperation {
/// Operation for creating new collection and (optionally) specify index params
pub collection_name: String,
/// Operation for creating new collection and (optionally) specify index params
pub distance: Distance,
/// Custom params for HNSW index. If none - values from service configuration file are used.
pub hnsw_config: Option<HnswConfigDiff>,
/// Custom params for Optimizers. If none - values from service configuration file are used.
pub optimizers_config: Option<OptimizersConfigDiff>,
/// Operation for creating new collection and (optionally) specify index params
pub vector_size: i64,
/// Custom params for WAL. If none - values from service configuration file are used.
pub wal_config: Option<WalConfigDiff>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateFieldIndex {
field_name: String,
}
///
/// Delete alias if exists
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeleteAlias {
/// Delete alias if exists
alias_name: String,
}
///
/// Delete alias if exists
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeleteAliasOperation {
/// Delete alias if exists
delete_alias: DeleteAlias,
}
#[derive(Debug, Clone, Deserialize)]
pub struct DeletePayload {
keys: Vec<String>,
points: Vec<ExtendedPointId>,
}
///
/// All possible payload filtering conditions
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FieldCondition {
/// All possible payload filtering conditions
key: String,
/// Check if points geo location lies in a given area
geo_bounding_box: Option<GeoBoundingBox>,
/// Check if geo point is within a given radius
geo_radius: Option<GeoRadius>,
/// Check if point has field with a given value
#[serde(rename = "match")]
_match: Option<Match>,
/// Check if points value lies in a given range
range: Option<Range>,
}
///
/// Create index for payload field
///
#[derive(Debug, Clone, Deserialize)]
pub struct FieldIndexOperationsOneOf {
/// Create index for payload field
create_index: String,
}
///
/// Delete index for the field
///
#[derive(Debug, Clone, Deserialize)]
pub struct FieldIndexOperationsOneOf1 {
/// Delete index for the field
delete_index: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Filter {
/// All conditions must match
pub must: Option<Vec<Condition>>,
/// All conditions must NOT match
pub must_not: Option<Vec<Condition>>,
/// At least one of thous conditions should match
pub should: Option<Vec<Condition>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FilterSelector {
pub filter: Filter,
}
///
/// Geo filter request Matches coordinates inside the rectangle, described by coordinates of
/// lop-left and bottom-right edges
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeoBoundingBox {
/// Geo filter request Matches coordinates inside the rectangle, described by coordinates of
/// lop-left and bottom-right edges
pub bottom_right: GeoPoint,
/// Geo filter request Matches coordinates inside the rectangle, described by coordinates of
/// lop-left and bottom-right edges
pub top_left: GeoPoint,
}
///
/// Geo point payload schema
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeoPoint {
/// Geo point payload schema
pub lat: f64,
/// Geo point payload schema
pub lon: f64,
}
///
/// Geo filter request Matches coordinates inside the circle of `radius` and center with coordinates `center`
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeoRadius {
/// Geo filter request Matches coordinates inside the circle of `radius` and center with
/// coordinates `center`
pub center: GeoPoint,
/// Radius of the area in meters
pub radius: f64,
}
///
/// ID-based filtering condition
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HasIdCondition {
/// ID-based filtering condition
pub has_id: Vec<ExtendedPointId>,
}
///
/// Config of HNSW index
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HnswConfig {
/// Number of neighbours to consider during the index building. Larger the value - more
/// accurate the search, more time required to build index.
pub ef_construct: i64,
/// Minimal amount of points for additional payload-based indexing. If payload chunk is
/// smaller than `full_scan_threshold` additional indexing won&#x27;t be used - in this case
/// full-scan search should be preferred by query planner and additional indexing is not
/// required.
pub full_scan_threshold: i64,
/// Number of edges per node in the index graph. Larger the value - more accurate the search,
/// more space required.
pub m: i64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HnswConfigDiff {
/// Number of neighbours to consider during the index building. Larger the value - more
/// accurate the search, more time required to build index.
pub ef_construct: Option<i64>,
/// Minimal amount of points for additional payload-based indexing. If payload chunk is smaller
/// than `full_scan_threshold` additional indexing won&#x27;t be used - in this case full-scan
/// search should be preferred by query planner and additional indexing is not required.
pub full_scan_threshold: Option<i64>,
/// Number of edges per node in the index graph. Larger the value - more accurate the search, more
/// space required.
pub m: Option<i64>,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub enum InlineResponseResult {
CollectionInfo(CollectionInfo),
Bool(bool),
CollectionsResponse(CollectionsResponse),
UpdateResult(UpdateResult),
Record(Record),
RecordList(Vec<Record>),
ScrollResult(ScrollResult),
ScoredPointList(Vec<ScoredPoint>),
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub enum InlineResponseStatus {
Error(ErrorResponseStatus),
Ok(OkStatus),
}
#[derive(Debug, Clone, Deserialize)]
pub struct ErrorResponseStatus {
/// Description of the occurred error.
pub error: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct InlineResponse {
/// Time spent to process this request
pub time: Option<f64>,
pub status: InlineResponseStatus,
pub result: Option<InlineResponseResult>,
}
///
/// Match filter request
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MatchInteger {
/// Integer value to match
pub integer: i64,
}
///
/// Match by keyword
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MatchKeyword {
/// Keyword value to match
pub keyword: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct OptimizersConfig {
/// Target amount of segments optimizer will try to keep. Real amount of segments may vary
/// depending on multiple parameters: - Amount of stored points - Current write RPS It is
/// recommended to select default number of segments as a factor of the number of search
/// threads, so that each segment would be handled evenly by one of the threads
pub default_segment_number: i64,
/// The minimal fraction of deleted vectors in a segment, required to perform segment
/// optimization
pub deleted_threshold: f64,
/// Minimum interval between forced flushes.
pub flush_interval_sec: i64,
/// Maximum number of vectors allowed for plain index. Default value based on
/// https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md
pub indexing_threshold: i64,
/// Maximum available threads for optimization workers
pub max_optimization_threads: i64,
/// Do not create segments larger this number of points. Large segments might require
/// disproportionately long indexation times, therefore it makes sense to limit the size of
/// segments. If indexation speed have more priority for your - make this parameter lower. If
/// search speed is more important - make this parameter higher.
pub max_segment_size: i64,
/// Maximum number of vectors to store in-memory per segment. Segments larger than this
/// threshold will be stored as read-only memmaped file.
pub memmap_threshold: i64,
/// Starting from this amount of vectors per-segment the engine will start building index for
/// payload.
pub payload_indexing_threshold: i64,
/// The minimal number of vectors in a segment, required to perform segment optimization
pub vacuum_min_vector_number: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct OptimizersConfigDiff {
/// Target amount of segments optimizer will try to keep. Real amount of segments may vary
/// depending on multiple parameters: - Amount of stored points - Current write RPS It is
/// recommended to select default number of segments as a factor of the number of search
/// threads, so that each segment would be handled evenly by one of the threads
pub default_segment_number: Option<i64>,
/// The minimal fraction of deleted vectors in a segment, required to perform segment
/// optimization
pub deleted_threshold: Option<f64>,
/// Minimum interval between forced flushes.
pub flush_interval_sec: Option<i64>,
/// Maximum number of vectors allowed for plain index. Default value based on
/// https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md
pub indexing_threshold: Option<i64>,
/// Maximum available threads for optimization workers
pub max_optimization_threads: Option<i64>,
/// Do not create segments larger this number of points. Large segments might require
/// disproportionately long indexation times, therefore it makes sense to limit the size of
/// segments. If indexation speed have more priority for your - make this parameter lower. If
/// search speed is more important - make this parameter higher.
pub max_segment_size: Option<i64>,
/// Maximum number of vectors to store in-memory per segment. Segments larger than this threshold will be stored as read-only memmaped file.
pub memmap_threshold: Option<i64>,
/// Starting from this amount of vectors per-segment the engine will start building index for
/// payload.
pub payload_indexing_threshold: Option<i64>,
/// The minimal number of vectors in a segment, required to perform segment optimization
pub vacuum_min_vector_number: Option<i64>,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub enum OptimizersStatus {
Ok(OkStatus),
Error(OptimizersStatusError),
}
///
/// Something wrong happened with optimizers
///
#[derive(Debug, Clone, Deserialize)]
pub struct OptimizersStatusError {
/// Something wrong happened with optimizers
pub error: String,
}
///
/// Set payload value, overrides if it is already exists
///
#[derive(Debug, Clone, Deserialize)]
pub struct PayloadOpsOneOf {
/// Set payload value, overrides if it is already exists
set_payload: SetPayload,
}
///
/// Deletes specified payload values if they are assigned
///
#[derive(Debug, Clone, Deserialize)]
pub struct PayloadOpsOneOf1 {
/// Deletes specified payload values if they are assigned
delete_payload: DeletePayload,
}
///
/// Drops all Payload values associated with given points.
///
#[derive(Debug, Clone, Deserialize)]
pub struct PayloadOpsOneOf2 {
/// Drops all Payload values associated with given points.
clear_payload: PayloadOpsOneOf2ClearPayload,
}
#[derive(Debug, Clone, Deserialize)]
pub struct PayloadOpsOneOf2ClearPayload {
points: Vec<ExtendedPointId>,
}
///
/// Clear all Payload values by given filter criteria.
///
#[derive(Debug, Clone, Deserialize)]
pub struct PayloadOpsOneOf3 {
/// Clear all Payload values by given filter criteria.
pub clear_payload_by_filter: Filter,
}
///
/// Payload field type &amp; index information
///
#[derive(Debug, Clone, Deserialize)]
pub struct PayloadSchemaInfo {
/// Payload field type &amp; index information
pub data_type: PayloadSchemaType,
/// Payload field type &amp; index information
pub indexed: bool,
}
#[derive(Debug, Clone, Deserialize)]
pub enum PayloadSchemaType {
Integer(i64),
Float(f64),
Geo(GeoPoint),
}
#[derive(Debug, Clone, Deserialize)]
pub enum PayloadType {
Integer(i64),
Float(f64),
Geo(GeoPoint),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PayloadSelectorExclude {
/// Exclude this fields from returning payload
exclude: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PayloadSelectorInclude {
/// Only include this payload keys
include: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PointIdsList {
points: Vec<ExtendedPointId>,
}
///
/// Delete point if exists
///
#[derive(Debug, Clone, Deserialize)]
pub struct PointOperationsOneOf1 {
/// Delete point if exists
delete_points: PointOperationsOneOf1DeletePoints,
}
#[derive(Debug, Clone, Deserialize)]
pub struct PointOperationsOneOf1DeletePoints {
ids: Vec<ExtendedPointId>,
}
///
/// Delete points by given filter criteria
///
#[derive(Debug, Clone, Deserialize)]
pub struct PointOperationsOneOf2 {
/// Delete points by given filter criteria
delete_points_by_filter: Filter,
}
#[derive(Debug, Clone, Deserialize)]
pub struct PointRequest {
/// Look for points with ids
pub ids: Vec<ExtendedPointId>,
/// Select which payload to return with the response. Default: All
pub with_payload: Option<WithPayloadInterface>,
/// Whether to return the point vector with the result?
pub with_vector: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PointStruct {
/// Id
pub id: ExtendedPointId,
/// Payload values (optional)
#[serde(skip_serializing_if = "Option::is_none")]
pub payload: Option<HashMap<String, Value>>,
/// Vector
pub vector: Vec<f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PointsBatch {
pub batch: Batch,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PointsList {
pub points: Vec<PointStruct>,
}
///
/// Range filter request
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Range {
/// point.key &gt; range.gt
pub gt: Option<f64>,
/// point.key &gt;= range.gte
pub gte: Option<f64>,
/// point.key &lt;= range.lt
pub lt: Option<f64>,
/// point.key &lt;= range.lte
pub lte: Option<f64>,
}
///
/// Recommendation request. Provides positive and negative examples of the vectors, which are
/// already stored in the collection. Service should look for the points which are closer to
/// positive examples and at the same time further to negative examples. The concrete way of
/// how to compare negative and positive distances is up to implementation in `segment` crate.
///
#[derive(Debug, Clone, Deserialize)]
pub struct RecommendRequest {
/// Look only for points which satisfies this conditions
filter: Option<Filter>,
/// Try to avoid vectors like this
negative: Vec<ExtendedPointId>,
/// Additional search params
params: Option<SearchParams>,
/// Look for vectors closest to those
positive: Vec<ExtendedPointId>,
/// Max number of result to return
top: i64,
/// Select which payload to return with the response. Default: None
with_payload: Option<WithPayloadInterface>,
/// Whether to return the point vector with the result?
with_vector: Option<bool>,
}
///
/// Point data
///
#[derive(Debug, Clone, Deserialize)]
pub struct Record {
/// Point data
id: ExtendedPointId,
/// Payload - values assigned to the point
payload: Option<HashMap<String, PayloadType>>,
/// Vector of the point
vector: Option<Vec<f64>>,
}
///
/// Change alias to a new one
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RenameAlias {
/// Change alias to a new one
new_alias_name: String,
/// Change alias to a new one
old_alias_name: String,
}
///
/// Change alias to a new one
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RenameAliasOperation {
/// Change alias to a new one
rename_alias: RenameAlias,
}
///
/// Search result
///
#[derive(Debug, Clone, Deserialize)]
pub struct ScoredPoint {
/// Search result
pub id: ExtendedPointId,
/// Payload - values assigned to the point
pub payload: Option<HashMap<String, PayloadType>>,
/// Points vector distance to the query vector
pub score: f64,
/// Vector of the point
pub vector: Option<Vec<f64>>,
/// Point version
pub version: Option<i64>,
}
///
/// Scroll request - paginate over all points which matches given condition
///
#[derive(Debug, Clone, Deserialize)]
pub struct ScrollRequest {
/// Look only for points which satisfies this conditions. If not provided - all points.
pub filter: Option<Filter>,
/// Limit
pub limit: Option<i64>,
/// Start ID to read points from.
pub offset: Option<ExtendedPointId>,
/// Select which payload to return with the response. Default: All
pub with_payload: Option<WithPayloadInterface>,
/// Whether to return the point vector with the result?
pub with_vector: Option<bool>,
}
///
/// Result of the points read request
///
#[derive(Debug, Clone, Deserialize)]
pub struct ScrollResult {
/// Offset which should be used to retrieve a next page result
pub next_page_offset: Option<ExtendedPointId>,
/// List of retrieved points
pub points: Vec<Record>,
}
///
/// Additional parameters of the search
///
#[derive(Debug, Clone, Deserialize)]
pub struct SearchParams {
/// Params relevant to HNSW index /// Size of the beam in a beam-search. Larger the value -
/// more accurate the result, more time required for search.
pub hnsw_ef: Option<i64>,
}
///
/// Search request. Holds all conditions and parameters for the search of most similar points
/// by vector similarity given the filtering restrictions.
///
#[derive(Debug, Clone, Deserialize)]
pub struct SearchRequest {
/// Look only for points which satisfies this conditions
pub filter: Option<Filter>,
/// Additional search params
pub params: Option<SearchParams>,
/// Max number of result to return
pub top: i64,
/// Look for vectors closest to this
pub vector: Vec<f64>,
/// Select which payload to return with the response. Default: None
pub with_payload: Option<WithPayloadInterface>,
/// Whether to return the point vector with the result?
pub with_vector: Option<bool>,
}
///
///
///
#[derive(Debug, Clone, Deserialize)]
pub struct SetPayload {
/// payload
pub payload: HashMap<String, Value>,
/// Assigns payload to each point in this list
pub points: Vec<ExtendedPointId>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct StorageOperationsOneOf {
pub create_collection: CreateCollectionOperation,
}
#[derive(Debug, Clone, Deserialize)]
pub struct StorageOperationsOneOf1 {
pub update_collection: UpdateCollectionOperation,
}
#[derive(Debug, Clone, Deserialize)]
pub struct StorageOperationsOneOf2 {
/// Operation for deleting collection with given name
pub delete_collection: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct StorageOperationsOneOf3 {
pub change_aliases: ChangeAliasesOperation,
}
///
/// Operation for updating parameters of the existing collection
///
#[derive(Debug, Clone, Deserialize)]
pub struct UpdateCollection {
///
/// Custom params for Optimizers. If none - values from service configuration file are used.
/// This operation is blocking, it will only proceed ones all current optimizations are
/// complete
///
pub optimizers_config: Option<OptimizersConfigDiff>,
}
///
/// Operation for updating parameters of the existing collection
///
#[derive(Debug, Clone, Deserialize)]
pub struct UpdateCollectionOperation {
///
/// Operation for updating parameters of the existing collection
///
pub collection_name: String,
///
/// Custom params for Optimizers. If none - values from service configuration file are used.
/// This operation is blocking, it will only proceed ones all current optimizations are
/// complete
///
pub optimizers_config: Option<OptimizersConfigDiff>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct UpdateResult {
///
/// Sequential number of the operation
///
pub operation_id: i64,
///
/// Update status
///
pub status: UpdateStatus,
}

3
src/models/mod.rs Normal file
View File

@ -0,0 +1,3 @@
mod collection;
pub use collection::*;