@@ -207,6 +207,216 @@ public class DeepSeekProvider: AIProvider {
207207 }
208208}
209209
210+ // MARK: - MiniMax Provider
211+
212+ /// Provider for MiniMax AI API.
213+ /// MiniMax uses OpenAI-compatible API format at https://api.minimax.io/v1
214+ @MainActor
215+ public class MiniMaxProvider : AIProvider {
216+ public let identifier : String
217+ public let config : ProviderConfiguration
218+ private let logger = Logger ( label: " com.sam.api.minimax " )
219+
220+ public init ( config: ProviderConfiguration ) {
221+ self . identifier = config. providerId
222+ self . config = config
223+ logger. debug ( " MiniMax Provider initialized " )
224+ }
225+
226+ public func processStreamingChatCompletion( _ request: OpenAIChatRequest ) async throws -> AsyncThrowingStream < ServerOpenAIChatStreamChunk , Error > {
227+ return AsyncThrowingStream { continuation in
228+ Task {
229+ do {
230+ /// Check cancellation before HTTP request.
231+ if Task . isCancelled {
232+ self . logger. debug ( " TASK_CANCELLED: MiniMax request cancelled before start " )
233+ continuation. finish ( )
234+ return
235+ }
236+
237+ let response = try await self . processChatCompletion ( request)
238+ let chunks = self . convertToStreamChunks ( response)
239+
240+ for chunk in chunks {
241+ /// Check cancellation in streaming loop.
242+ if Task . isCancelled {
243+ self . logger. debug ( " TASK_CANCELLED: MiniMax streaming cancelled " )
244+ continuation. finish ( )
245+ return
246+ }
247+
248+ continuation. yield ( chunk)
249+ try await Task . sleep ( nanoseconds: 50_000_000 )
250+ }
251+
252+ continuation. finish ( )
253+ } catch {
254+ continuation. finish ( throwing: error)
255+ }
256+ }
257+ }
258+ }
259+
260+ private func convertToStreamChunks( _ response: ServerOpenAIChatResponse ) -> [ ServerOpenAIChatStreamChunk ] {
261+ guard let choice = response. choices. first else { return [ ] }
262+ var chunks : [ ServerOpenAIChatStreamChunk ] = [ ]
263+
264+ chunks. append ( ServerOpenAIChatStreamChunk (
265+ id: response. id,
266+ object: " chat.completion.chunk " ,
267+ created: response. created,
268+ model: response. model,
269+ choices: [ OpenAIChatStreamChoice ( index: 0 , delta: OpenAIChatDelta ( role: " assistant " , content: nil ) ) ]
270+ ) )
271+
272+ let words = ( choice. message. content ?? " " ) . components ( separatedBy: . whitespacesAndNewlines) . filter { !$0. isEmpty }
273+ for word in words {
274+ chunks. append ( ServerOpenAIChatStreamChunk (
275+ id: response. id,
276+ object: " chat.completion.chunk " ,
277+ created: response. created,
278+ model: response. model,
279+ choices: [ OpenAIChatStreamChoice ( index: 0 , delta: OpenAIChatDelta ( content: word + " " ) ) ]
280+ ) )
281+ }
282+
283+ chunks. append ( ServerOpenAIChatStreamChunk (
284+ id: response. id,
285+ object: " chat.completion.chunk " ,
286+ created: response. created,
287+ model: response. model,
288+ choices: [ OpenAIChatStreamChoice ( index: 0 , delta: OpenAIChatDelta ( ) , finishReason: " stop " ) ]
289+ ) )
290+
291+ return chunks
292+ }
293+
294+ public func processChatCompletion( _ request: OpenAIChatRequest ) async throws -> ServerOpenAIChatResponse {
295+ let requestId = UUID ( ) . uuidString
296+ logger. debug ( " Processing chat completion via MiniMax API [req: \( requestId. prefix ( 8 ) ) ] " )
297+
298+ guard let apiKey = config. apiKey else {
299+ throw ProviderError . authenticationFailed ( " MiniMax API key not configured " )
300+ }
301+
302+ guard let baseURL = config. baseURL ?? ProviderType . minimax. defaultBaseURL else {
303+ throw ProviderError . invalidConfiguration ( " MiniMax base URL not configured " )
304+ }
305+
306+ /// MiniMax uses OpenAI-compatible API format.
307+ guard let url = URL ( string: " \( baseURL) /chat/completions " ) else {
308+ throw ProviderError . invalidConfiguration ( " Invalid MiniMax base URL: \( baseURL) " )
309+ }
310+
311+ var urlRequest = URLRequest ( url: url)
312+ urlRequest. httpMethod = " POST "
313+ urlRequest. setValue ( " application/json " , forHTTPHeaderField: " Content-Type " )
314+ urlRequest. setValue ( " Bearer \( apiKey) " , forHTTPHeaderField: " Authorization " )
315+
316+ /// MiniMax has specific temperature range: 0.01 to 1.0
317+ let temperature = request. temperature ?? config. temperature ?? 0.7
318+ let clampedTemperature = min ( max ( temperature, 0.01 ) , 1.0 )
319+
320+ /// Create request body.
321+ let requestBody : [ String : Any ] = [
322+ " model " : request. model,
323+ " messages " : request. messages. map { message in
324+ [
325+ " role " : message. role,
326+ " content " : message. content
327+ ]
328+ } ,
329+ " max_tokens " : request. maxTokens ?? config. maxTokens ?? 8192 ,
330+ " temperature " : clampedTemperature,
331+ " stream " : false
332+ ]
333+
334+ do {
335+ urlRequest. httpBody = try JSONSerialization . data ( withJSONObject: requestBody)
336+ } catch {
337+ throw ProviderError . networkError ( " Failed to serialize request: \( error. localizedDescription) " )
338+ }
339+
340+ /// Set timeout (5 minutes minimum for tool-enabled requests).
341+ /// Even if config specifies lower timeout, enforce 300s minimum to prevent timeouts.
342+ let configuredTimeout = TimeInterval ( config. timeoutSeconds ?? 300 )
343+ urlRequest. timeoutInterval = max ( configuredTimeout, 300 )
344+
345+ logger. debug ( " Sending request to MiniMax API [req: \( requestId. prefix ( 8 ) ) ] " )
346+
347+ do {
348+ let ( data, response) = try await URLSession . shared. data ( for: urlRequest)
349+
350+ guard let httpResponse = response as? HTTPURLResponse else {
351+ throw ProviderError . networkError ( " Invalid response type " )
352+ }
353+
354+ logger. debug ( " MiniMax API response [req: \( requestId. prefix ( 8 ) ) ]: \( httpResponse. statusCode) " )
355+
356+ guard 200 ... 299 ~= httpResponse. statusCode else {
357+ if let errorData = String ( data: data, encoding: . utf8) {
358+ logger. error ( " MiniMax API error [req: \( requestId. prefix ( 8 ) ) ]: \( errorData) " )
359+ }
360+ throw ProviderError . networkError ( " MiniMax API returned status \( httpResponse. statusCode) " )
361+ }
362+
363+ /// Parse response.
364+ let minimaxResponse = try JSONDecoder ( ) . decode ( ServerOpenAIChatResponse . self, from: data)
365+ logger. debug ( " Successfully processed MiniMax response [req: \( requestId. prefix ( 8 ) ) ] " )
366+
367+ return minimaxResponse
368+
369+ } catch let error as ProviderError {
370+ throw error
371+ } catch {
372+ logger. error ( " MiniMax API request failed [req: \( requestId. prefix ( 8 ) ) ]: \( error) " )
373+ throw ProviderError . networkError ( " Network error: \( error. localizedDescription) " )
374+ }
375+ }
376+
377+ public func getAvailableModels( ) async throws -> ServerOpenAIModelsResponse {
378+ let models = config. models. map { modelId in
379+ ServerOpenAIModel (
380+ id: modelId,
381+ object: " model " ,
382+ created: Int ( Date ( ) . timeIntervalSince1970) ,
383+ ownedBy: " minimax "
384+ )
385+ }
386+
387+ return ServerOpenAIModelsResponse (
388+ object: " list " ,
389+ data: models
390+ )
391+ }
392+
393+ public func supportsModel( _ model: String ) -> Bool {
394+ return config. models. contains ( model) || model. hasPrefix ( " MiniMax- " )
395+ }
396+
397+ public func validateConfiguration( ) async throws -> Bool {
398+ guard let apiKey = config. apiKey, !apiKey. isEmpty else {
399+ throw ProviderError . authenticationFailed ( " MiniMax API key is required " )
400+ }
401+
402+ return true
403+ }
404+
405+ // MARK: - Lifecycle
406+
407+ public func loadModel( ) async throws -> ModelCapabilities {
408+ throw ProviderError . invalidRequest ( " loadModel() not supported for remote providers " )
409+ }
410+
411+ public func getLoadedStatus( ) async -> Bool {
412+ return false
413+ }
414+
415+ public func unload( ) async {
416+ /// No-op for remote providers.
417+ }
418+ }
419+
210420// MARK: - Supporting Types
211421
212422public struct ModelStatus {
0 commit comments