Last active
April 16, 2025 05:51
-
-
Save jordymeow/8295ee8c1a1baeb2b9f08982bf21e971 to your computer and use it in GitHub Desktop.
Integrates a custom LLM server ("LLMServer") into AI Engine as a new engine
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<?php | |
/* | |
Plugin Name: LLMServer Integration | |
Plugin URI: https://example.com | |
Description: Integrates a custom LLM server ("LLMServer") into AI Engine as a new engine, similarly to how Ollama is integrated. | |
Version: 1.0.0 | |
Author: Your Name | |
Author URI: https://example.com | |
License: GPLv2 or later | |
Text Domain: llmserver-integration | |
*/ | |
/** | |
* HOW THIS PLUGIN WORKS: | |
* | |
* 1. We hook into two AI Engine filters: | |
* - mwai_engines: We add our new engine ("LLMServer") to the array of available engines. | |
* - mwai_init_engine: When AI Engine attempts to initialize an engine, we detect if | |
* the engine type is "llmserver". If so, we instantiate our custom My_LLMServer_Engine class. | |
* | |
* 2. My_LLMServer_Engine extends Meow_MWAI_Engines_OpenAI so we can reuse much of AI Engine's | |
* existing logic for OpenAI-like requests (endpoints, body structure, etc.). | |
* | |
* 3. In practice, we set $this->endpoint and $this->apiKey based on the plugin settings. | |
* We override methods like build_url(), build_headers(), and build_body() to adapt | |
* them to our custom LLM server. | |
* | |
* 4. Activating this plugin and having AI Engine installed will let you choose "LLMServer" | |
* in AI Engine's settings. You can then specify your server's endpoint and API key. | |
* | |
* HOW TO USE: | |
* 1. Place this file in your wp-content/plugins/ directory. | |
* 2. Activate this plugin from your WordPress admin. | |
* 3. Go to AI Engine → Settings → Engines and select "LLMServer". | |
* 4. Configure the endpoint and API key fields as needed. | |
* 5. Test by sending prompts via AI Engine's Chatbot or Playground, which will now call your LLM server. | |
* | |
* NOTE: This example is minimal. Adjust the build_url(), build_headers(), and build_body() methods | |
* as needed for your custom server's API. | |
*/ | |
/** | |
* We define our "core" integration class that attaches to the filters and | |
* instantiates the LLMServer engine when requested. | |
*/ | |
if ( !class_exists( 'My_LLMServer_Integration' ) ) { | |
class My_LLMServer_Integration { | |
public function __construct() { | |
/** | |
* Filter #1: mwai_engines | |
* - We add our engine's definition to the existing array. | |
*/ | |
add_filter( 'mwai_engines', [ $this, 'add_engine' ], 10, 1 ); | |
/** | |
* Filter #2: mwai_init_engine | |
* - We check if the requested engine type is "llmserver" and instantiate our class if so. | |
*/ | |
add_filter( 'mwai_init_engine', [ $this, 'init_engine' ], 10, 3 ); | |
} | |
/** | |
* Adds "LLMServer" to the list of engines recognized by AI Engine. | |
* | |
* @param array $engines The current list of engine definitions. | |
* @return array Updated list with our new engine appended. | |
*/ | |
public function add_engine( $engines ) { | |
// AI Engine will automatically add this to the available engines. | |
// The supported inputs are endpoint, apiKey and dynamicModels. You can include them in the | |
// inputs, they will be automatically available as fields in the settings. Feel free to remove them. | |
$engines[] = [ | |
'name' => 'LLMServer', // Display name in the UI | |
'type' => 'llmserver', // Internal unique slug | |
'inputs' => [ 'endpoint', 'apiKey', 'dynamicModels' ], | |
]; | |
return $engines; | |
} | |
/** | |
* Called by AI Engine during initialization of an engine. | |
* If the selected type is 'llmserver', we create a new My_LLMServer_Engine instance. | |
* | |
* @param object|null $engine The current engine object or null. | |
* @param object $core The AI Engine core instance. | |
* @param array $env Array containing engine type, endpoint, etc. | |
* @return object The instantiated My_LLMServer_Engine or the existing $engine if type doesn't match. | |
*/ | |
public function init_engine( $engine, $core, $env ) { | |
// Check if the user selected our engine type. | |
if ( isset( $env['type'] ) && $env['type'] === 'llmserver' ) { | |
// Instantiate our custom engine class defined below. | |
$engine = new My_LLMServer_Engine( $core, $env ); | |
} | |
return $engine; | |
} | |
} | |
} | |
/** | |
* This class extends Meow_MWAI_Engines_OpenAI. | |
* AI Engine relies on certain OpenAI-like methods, so we reuse them. | |
*/ | |
if ( !class_exists( 'My_LLMServer_Engine' ) && class_exists( 'Meow_MWAI_Engines_OpenAI' ) ) { | |
class My_LLMServer_Engine extends Meow_MWAI_Engines_OpenAI | |
{ | |
/** | |
* Constructor simply calls the parent constructor, | |
* which expects the AI Engine core object and an $env array with settings. | |
*/ | |
public function __construct( $core, $env ) { | |
parent::__construct( $core, $env ); | |
} | |
/** | |
* Set environment variables like endpoint and API key | |
* from the user-provided settings (in $this->env). | |
*/ | |
protected function set_environment() { | |
// By default, the plugin sets $env['endpoint'] and $env['apiKey'] if the user configured them. | |
$this->endpoint = !empty( $this->env['endpoint'] ) ? $this->env['endpoint'] : 'https://default-llmserver.com/api'; | |
$this->apiKey = !empty( $this->env['apiKey'] ) ? $this->env['apiKey'] : ''; | |
} | |
/** | |
* Build the URL for your LLMServer's completion or chat endpoint. | |
* | |
* @param object $query The AI Engine query object (contains user prompt, etc.). | |
* @param string $endpoint Optionally override the endpoint. | |
* @return string The full URL to call. | |
*/ | |
protected function build_url( $query, $endpoint = null ) { | |
// Allow advanced users to adjust the endpoint via a filter. | |
$endpoint = apply_filters( 'mwai_llmserver_endpoint', $this->endpoint, $this->env ); | |
// Example: Suppose your server listens at /v1/chat for chat completions. | |
$url = rtrim( $endpoint, '/' ) . '/v1/chat'; | |
return $url; | |
} | |
/** | |
* Build the headers for the request. | |
* For instance, if your LLMServer needs an Authorization header. | |
*/ | |
protected function build_headers( $query ) { | |
// Start with the parent’s default headers. | |
$headers = parent::build_headers( $query ); | |
// Add your custom authorization if needed. | |
// For example: "Authorization: Bearer <API Key>" | |
if ( !empty( $this->apiKey ) ) { | |
$headers['Authorization'] = 'Bearer ' . $this->apiKey; | |
} | |
return $headers; | |
} | |
/** | |
* Build the request body (JSON payload) to send to your LLMServer. | |
* You can rename or tweak fields to match your server's API spec. | |
*/ | |
protected function build_body( $query, $streamCallback = null, $extra = null ) { | |
// The parent method constructs an array with typical keys: | |
// - 'prompt', 'max_tokens', etc. | |
$body = parent::build_body( $query, $streamCallback, $extra ); | |
// If your server requires differently named fields, you can rename them here, e.g.: | |
// $body['my_custom_prompt_key'] = $body['prompt']; | |
// unset($body['prompt']); | |
return $body; | |
} | |
/** | |
* Return a name for logging/monitoring usage in AI Engine. | |
*/ | |
protected function get_service_name() { | |
return "LLMServer"; | |
} | |
/** | |
* If your service has multiple models, you can retrieve them dynamically | |
* so that AI Engine can display them in a dropdown. | |
* | |
* For example, call https://your-llmserver.com/v1/models to list them. | |
*/ | |
public function retrieve_models() { | |
// A minimal example returning a static array of models: | |
// Each model can have additional properties (e.g. max tokens, type, etc.) | |
return [ | |
[ | |
'model' => 'llmserver-base', | |
'name' => 'LLMServer Base Model', | |
'family' => 'Base', | |
'features' => [ 'completion', 'chat' ], | |
'tags' => [ 'chat' ], | |
'price' => [ | |
'in' => 0, | |
'out' => 0 | |
], | |
'type' => 'token', | |
'unit' => 1 / 1000, | |
'maxCompletionTokens' => 2048, | |
'maxContextualTokens' => 4096 | |
] | |
]; | |
} | |
} | |
} | |
/** | |
* Finally, we instantiate our main integration class, so it hooks into AI Engine. | |
* (Assuming AI Engine is installed and active.) | |
*/ | |
new My_LLMServer_Integration(); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment