Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions backend/app/rag/llms/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ class LLMProvider(str, enum.Enum):
OLLAMA = "ollama"
GITEEAI = "giteeai"
AZURE_OPENAI = "azure_openai"
MINIMAX = "minimax"


class LLMProviderOption(BaseModel):
Expand Down Expand Up @@ -167,4 +168,26 @@ class LLMProviderOption(BaseModel):
credentials_type="str",
default_credentials="****",
),
LLMProviderOption(
provider=LLMProvider.MINIMAX,
provider_display_name="MiniMax",
provider_description="MiniMax is a leading AI technology company offering powerful large language models with OpenAI-compatible APIs, featuring MiniMax-M2.7 for complex reasoning tasks.",
provider_url="https://platform.minimax.io",
default_llm_model="MiniMax-M2.7",
llm_model_description="Available models: MiniMax-M2.7 (peak performance), MiniMax-M2.7-highspeed (faster). See https://platform.minimax.io/docs/guides/pricing-paygo",
default_config={
"api_base": "https://api.minimax.io/v1",
"is_chat_model": True,
"context_window": 204800,
},
config_description=(
"`api_base` is the API base URL of MiniMax, default is `https://api.minimax.io/v1`; "
"`is_chat_model` indicates whether the model is chat model; "
"`context_window` is the maximum number of input tokens and output tokens; "
),
credentials_display_name="MiniMax API Key",
credentials_description="The API key of MiniMax, you can get it from https://platform.minimax.io",
credentials_type="str",
default_credentials="eyJ****",
),
]
15 changes: 15 additions & 0 deletions backend/app/rag/llms/resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,21 @@ def resolve_llm(
api_key=credentials,
**config,
)
case LLMProvider.MINIMAX:
from llama_index.llms.openai_like import OpenAILike

config.setdefault("is_chat_model", True)
config.setdefault("context_window", 204800)
temperature = config.pop("temperature", None)
if temperature is not None and temperature <= 0:
temperature = 1.0
return OpenAILike(
model=model,
api_base=config.pop("api_base", "https://api.minimax.io/v1"),
api_key=credentials,
temperature=temperature if temperature is not None else 1.0,
**config,
)
case LLMProvider.AZURE_OPENAI:
from llama_index.llms.azure_openai import AzureOpenAI

Expand Down
292 changes: 292 additions & 0 deletions backend/tests/test_minimax.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,292 @@
"""Unit and integration tests for the MiniMax LLM provider."""

import os
from logging import getLogger

import pytest

logger = getLogger(__name__)


# ---------------------------------------------------------------------------
# Unit Tests — Provider Enum & Options (no heavy app deps required)
# ---------------------------------------------------------------------------


class TestMiniMaxProviderEnum:
"""Tests for the MiniMax provider enum registration."""

def test_minimax_in_provider_enum(self):
from app.rag.llms.provider import LLMProvider

assert hasattr(LLMProvider, "MINIMAX")
assert LLMProvider.MINIMAX.value == "minimax"

def test_minimax_in_provider_options(self):
from app.rag.llms.provider import LLMProvider, llm_provider_options

minimax_options = [
opt for opt in llm_provider_options if opt.provider == LLMProvider.MINIMAX
]
assert len(minimax_options) == 1, (
"MiniMax should appear exactly once in provider options"
)

def test_minimax_provider_option_fields(self):
from app.rag.llms.provider import LLMProvider, llm_provider_options

minimax_opt = next(
opt for opt in llm_provider_options if opt.provider == LLMProvider.MINIMAX
)
assert minimax_opt.provider_display_name == "MiniMax"
assert minimax_opt.default_llm_model == "MiniMax-M2.7"
assert minimax_opt.credentials_type == "str"
assert "platform.minimax.io" in minimax_opt.provider_url

def test_minimax_default_config(self):
from app.rag.llms.provider import LLMProvider, llm_provider_options

minimax_opt = next(
opt for opt in llm_provider_options if opt.provider == LLMProvider.MINIMAX
)
config = minimax_opt.default_config
assert config["api_base"] == "https://api.minimax.io/v1"
assert config["is_chat_model"] is True
assert config["context_window"] == 204800

def test_minimax_provider_description(self):
from app.rag.llms.provider import LLMProvider, llm_provider_options

minimax_opt = next(
opt for opt in llm_provider_options if opt.provider == LLMProvider.MINIMAX
)
assert "MiniMax" in minimax_opt.provider_description
assert minimax_opt.provider_url == "https://platform.minimax.io"

def test_minimax_credentials_info(self):
from app.rag.llms.provider import LLMProvider, llm_provider_options

minimax_opt = next(
opt for opt in llm_provider_options if opt.provider == LLMProvider.MINIMAX
)
assert minimax_opt.credentials_display_name == "MiniMax API Key"
assert "platform.minimax.io" in minimax_opt.credentials_description

def test_minimax_model_description(self):
from app.rag.llms.provider import LLMProvider, llm_provider_options

minimax_opt = next(
opt for opt in llm_provider_options if opt.provider == LLMProvider.MINIMAX
)
assert "MiniMax-M2.7" in minimax_opt.llm_model_description
assert "MiniMax-M2.7-highspeed" in minimax_opt.llm_model_description


class TestMiniMaxOpenAILikeInstantiation:
"""Test MiniMax provider resolves correctly via OpenAILike.

These tests directly instantiate OpenAILike without importing the full
app stack, simulating what resolve_llm() does for the MINIMAX provider.
"""

def test_creates_openai_like_with_minimax_config(self):
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimax.io/v1",
api_key="test-api-key",
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
assert llm.class_name() == "OpenAILike"
assert llm.model == "MiniMax-M2.7"
assert llm.api_key == "test-api-key"
assert llm.api_base == "https://api.minimax.io/v1"

def test_highspeed_model(self):
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7-highspeed",
api_base="https://api.minimax.io/v1",
api_key="test-api-key",
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
assert llm.model == "MiniMax-M2.7-highspeed"

def test_default_temperature_is_one(self):
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimax.io/v1",
api_key="test-api-key",
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
assert llm.temperature == 1.0

def test_custom_temperature(self):
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimax.io/v1",
api_key="test-api-key",
is_chat_model=True,
context_window=204800,
temperature=0.7,
)
assert llm.temperature == 0.7

def test_custom_api_base_for_cn(self):
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimaxi.com/v1",
api_key="test-api-key",
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
assert llm.api_base == "https://api.minimaxi.com/v1"

def test_is_chat_model(self):
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimax.io/v1",
api_key="test-api-key",
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
assert llm.is_chat_model is True

def test_context_window(self):
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimax.io/v1",
api_key="test-api-key",
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
assert llm.context_window == 204800

def test_temperature_clamping_logic(self):
"""MiniMax requires temperature > 0. Verify the clamping logic
used in resolve_llm()."""
temperature = 0
if temperature <= 0:
temperature = 1.0
assert temperature == 1.0

temperature = 0.5
if temperature <= 0:
temperature = 1.0
assert temperature == 0.5

def test_dspy_conversion_from_openai_like(self):
"""MiniMax LLM uses OpenAILike which maps to dspy 'OpenAILike' case."""
import dspy
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimax.io/v1",
api_key="test-api-key",
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
assert llm.class_name() == "OpenAILike"

# Simulate what dspy.py does for OpenAILike
lm = dspy.LM(
model=f"openai/{llm.model}",
max_tokens=llm.max_tokens,
api_key=llm.api_key,
api_base=llm.api_base + "/",
model_type="chat" if llm.is_chat_model else "text",
num_retries=3,
)
assert lm is not None
assert "MiniMax-M2.7" in lm.model


# ---------------------------------------------------------------------------
# Integration Tests (require MINIMAX_API_KEY)
# ---------------------------------------------------------------------------


MINIMAX_API_KEY = os.getenv("MINIMAX_API_KEY")


@pytest.mark.skipif(not MINIMAX_API_KEY, reason="MINIMAX_API_KEY not set")
class TestMiniMaxIntegration:
"""Integration tests that call the real MiniMax API via OpenAILike."""

def test_minimax_basic_chat(self):
from llama_index.core import PromptTemplate
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimax.io/v1",
api_key=MINIMAX_API_KEY,
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
prompt = PromptTemplate("What is 2 + 2? Reply with just the number.")
output = llm.predict(prompt)
assert "4" in output
logger.info(f"MiniMax basic chat response: {output}")

def test_minimax_highspeed_chat(self):
from llama_index.core import PromptTemplate
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7-highspeed",
api_base="https://api.minimax.io/v1",
api_key=MINIMAX_API_KEY,
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
prompt = PromptTemplate("Say 'hello' in one word.")
output = llm.predict(prompt)
assert len(output) > 0
logger.info(f"MiniMax highspeed response: {output}")

def test_minimax_streaming(self):
from llama_index.core.llms import ChatMessage
from llama_index.llms.openai_like import OpenAILike

llm = OpenAILike(
model="MiniMax-M2.7",
api_base="https://api.minimax.io/v1",
api_key=MINIMAX_API_KEY,
is_chat_model=True,
context_window=204800,
temperature=1.0,
)
messages = [ChatMessage(role="user", content="Count from 1 to 5.")]
chunks = []
for chunk in llm.stream_chat(messages):
chunks.append(chunk)
assert len(chunks) > 0
full_response = chunks[-1].message.content
assert any(str(i) in full_response for i in range(1, 6))
logger.info(f"MiniMax streaming response: {full_response}")
10 changes: 10 additions & 0 deletions docs/src/content/llm.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -185,3 +185,13 @@ Default config:
```

To learn more about DeepSeek, please visit [DeepSeek](https://www.deepseek.com/).

### MiniMax

MiniMax is a leading AI technology company offering powerful large language models. AutoFlow supports MiniMax as a first-class LLM provider with OpenAI-compatible APIs.

Available models: `MiniMax-M2.7` (peak performance), `MiniMax-M2.7-highspeed` (faster and more agile).

To get an API key, visit [MiniMax Platform](https://platform.minimax.io).

For more information, see the [MiniMax API documentation](https://platform.minimax.io/docs/api-reference/text-openai-api).