Configuration
How to set the configuration for your server
# The following line defines the API key to access the server end-points.
api_key = "mykey"
################################### Data Security Secret ####################################
# The following line defines a secret used to protect your data at rest and ensures
# there is no correlatability between your own data and someone else's who is using this
# product. Even if your database layer is compromised, protecting this secret will thwart
# many forms of attack vectors. This secret cannot be changed without re-enrolling all
# your data again. For secret rotation, it is suggested you re-enroll your entire data
# from time to time and point to a new instance while retiring the instance with the old
# secret.
#
# IMPORTANT NOTE: person_id and person_name are stored in plain text and are searchable.
# For full privacy, it is sugggested that you use UUIDs for person_id and skip storing
# the name where it is not neccessary. This is a trade-off between the convenience of
# search and the privacy of non-correlatibility that you can consider to suit your
# use-case. It is also suggested that you use UUIDs for collection names while
# maintaining a separate database that maps those UUIDs to meaningful groups.
#
# This product stores Renewable Biometric References as per the below standard:
# Biometric information protection - ISO/IEC 24745:2022:
# https://www.iso.org/standard/75302.html
#
# The secret that you are setting below corresponds to Auxiliary Data as per the
# standard.
data_security_secret = "changethis"
# Change the following line to define another interface for the server to listen on.
ip = "0.0.0.0"
# Max request size in MB. Change this to restrict request size.
max_payload_size = 5
# Uncomment one of the following lines to define the database to use.
# The default is "database/opencv-fr-database" (this uses the local file system).
# Local file system:
conn_str = "database/opencv-fr-database-new"
# AWS S3:
#conn_str = "s3://bucket/path"
# Uncomment and set the following lines if you are using AWS S3.
# aws_access_key_id = "my-access-key"
# aws_secret_access_key = "my-secret-key"
# aws_session_token = "my-session-token"
############################### Read consistency interval ###################################
# The interval at which to check for db updates from other instances. If more than this
# duration has passed since the last read, the read will check for updates from other
# instances.
# 0s (Strong consistency): The database checks for updates on every read.
# This provides the strongest consistency guarantees, ensuring that all instances see the
# latest committed data. However, it has the most overhead. This setting is suitable when
# consistency matters more than having high QPS.
# ns (Eventual consistency): The database checks for updates every n seconds. This
# provides eventual consistency, allowing for some lag between write and read operations.
# Changes made on one instance may not be immediately visible on another instance. This
# setting is suitable when having high QPS is more important than consistency.
# The default is "10s" which allows for new data inserted on one instance to be visible
# on other instances at most 10 seconds later. Change this value to suit your needs to
# balance consistency and performance.
conn_read_consistency_interval = "10s"
################################## Index retrieval tuning ###################################
# Indexing is recommended only when your collection is larger than 1,000,000 persons.
# When using an index, face search can miss some items. Having no index means the search
# will be slower but it will also be exhaustive and not miss any items. If you have more
# than a million items in a collection, you can create an index. When using an index,
# tuning the following will allow you to refine your search results according to your
# dataset distribution.
# The number of probes determines the distribution of vector space. While a higher number
# enhances search accuracy, it also results in slower performance. Typically, setting
# nprobes to cover 5–10% of the dataset proves effective in achieving high recall with
# minimal latency.
conn_n_probes_percentage = 10
# Refine the results by reading extra elements and re-ranking them in memory. A higher
# number makes the search more accurate but also slower. Please see:
# https://lancedb.github.io/lancedb/faq/#how-can-i-speed-up-data-inserts
conn_refine_factor = 50
# The time to wait before allowing another optimize or indexing job if one is currently
# running on a particular collection
conn_job_wait_time_sec = 3600Last updated

