Atoma node deployment
Deploying an Atoma node
git clone https://github.com/atoma-network/atoma-node.git
cd atoma-nodecp .env.example .env# Hugging Face Configuration
HF_CACHE_PATH=~/.cache/huggingface
HF_TOKEN= # Required for gated models
# Inference Server Configuration
INFERENCE_SERVER_PORT=50000 # External port for vLLM service
MODEL=meta-llama/Llama-3.1-70B-Instruct
MAX_MODEL_LEN=4096 # Context length
GPU_COUNT=1 # Number of GPUs to use
TENSOR_PARALLEL_SIZE=1 # Should be equal to GPU_COUNT
# Sui Configuration
SUI_CONFIG_PATH=~/.sui/sui_config
# Atoma Node Service Configuration
ATOMA_SERVICE_PORT=3000 # External port for Atoma serviceLast updated