Changeset 3471585
- Timestamp:
- 02/28/2026 11:31:48 AM (4 weeks ago)
- Location:
- technodrome-ai-content-assistant/trunk
- Files:
-
- 8 edited
-
CHANGELOG.md (modified) (1 diff)
-
dashboard/modules/history-tab/history.php (modified) (1 diff)
-
features/footer/generate-button.js (modified) (1 diff)
-
features/history-tab/history-load-more.js (modified) (1 diff)
-
includes/class-ai-providers.php (modified) (18 diffs)
-
includes/class-ajax-handler.php (modified) (1 diff)
-
readme.txt (modified) (2 diffs)
-
technodrome-ai-content-assistant.php (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
technodrome-ai-content-assistant/trunk/CHANGELOG.md
r3471565 r3471585 5 5 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 6 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 8 ## [4.1.2] - 2026-02-28 9 10 ### Fixed 11 - **AI Content Truncation**: Articles no longer cut off mid-sentence/mid-word for non-English languages (Serbian, Croatian, Bosnian, etc.). Root cause: non-English text uses 2–2.5 tokens per word vs ~1.3 for English, plus HTML tag overhead — the `word_count * 3` formula was too low. 12 13 ### Improved 14 - **`max_tokens` Multiplier**: Increased from `word_count × 3` to `word_count × 5` across all 9 AI providers — provides enough headroom for any language and HTML markup: 15 - Short (400 words): 1,200 → 2,000 tokens 16 - Medium (650 words): 1,950 → 3,250 tokens 17 - Long (1,000 words): 3,000 → 5,000 tokens 18 - Extended (1,500 words): 4,500 → 7,500 tokens 19 - **API Request Timeout**: Increased from 60s to 120s for all generation calls — prevents timeouts for longer content with slower models 20 21 ### Providers Updated 22 OpenAI, Anthropic, Google (Gemini), DeepSeek, Cohere, Groq, Together AI, Mistral, GLM 23 24 --- 7 25 8 26 ## [4.1.1] - 2026-02-28 -
technodrome-ai-content-assistant/trunk/dashboard/modules/history-tab/history.php
r3461963 r3471585 100 100 'edit_link' => get_edit_post_link($taics_post_id), 101 101 'view_link' => get_permalink($taics_post_id), 102 'excerpt' => wp_trim_words(get_the_content(), 20),102 'excerpt' => wp_trim_words(get_the_content(), 75), 103 103 'category' => $taics_category_name, 104 104 'ai_provider' => $taics_ai_provider, -
technodrome-ai-content-assistant/trunk/features/footer/generate-button.js
r3468923 r3471585 46 46 47 47 // v4.0.7: Validate video slot is populated for video context modes 48 // v4.1.2 FIX: Use correct field ID (#video-url-N, not #taics-video-url-N) 48 49 if (isVideoContextMode) { 49 50 const requiredSlot = (generationMode === 'ai_with_video_channel_context') ? 2 : 1; 50 const slotUrl = $(`# taics-video-url-${requiredSlot}`).val() || '';51 const slotUrl = $(`#video-url-${requiredSlot}`).val() || ''; 51 52 if (slotUrl.trim().length < 5) { 52 53 const slotName = requiredSlot === 2 ? 'Channel URL in Slot 2' : 'Video URL in Slot 1'; -
technodrome-ai-content-assistant/trunk/features/history-tab/history-load-more.js
r3372557 r3471585 207 207 </div> 208 208 <div class="taics-item-excerpt"> 209 <p> ${item.excerpt}</p>209 <p><em style="font-size:11px;color:#888;">Preview (first 75 words):</em><br>${item.excerpt}</p> 210 210 </div> 211 211 </div> -
technodrome-ai-content-assistant/trunk/includes/class-ai-providers.php
r3462088 r3471585 17 17 'model' => $args['model'] ?: 'gpt-4o', 18 18 'messages' => array(array('role' => 'user', 'content' => $prompt)), 19 'max_tokens' => intval($args['word_count']) * 3, // Increased from *2 to *3 for complete content19 'max_tokens' => intval($args['word_count']) * 5, // v4.1.2: Increased from *3 to *5 for non-English languages 20 20 'temperature' => 0.7 21 21 ); … … 27 27 ), 28 28 'body' => json_encode($request_data), 29 'timeout' => 6029 'timeout' => 120 30 30 )); 31 31 … … 161 161 $request_data = array( 162 162 'model' => $args['model'] ?: 'claude-3-5-sonnet-20241022', 163 'max_tokens' => intval($args['word_count']) * 3, // Increased from *2 to *3 for complete content163 'max_tokens' => intval($args['word_count']) * 5, // v4.1.2: Increased from *3 to *5 for non-English languages 164 164 'messages' => array(array('role' => 'user', 'content' => $prompt)) 165 165 ); 166 166 167 167 $response = wp_remote_post('https://api.anthropic.com/v1/messages', array( 168 168 'headers' => array( … … 172 172 ), 173 173 'body' => json_encode($request_data), 174 'timeout' => 60174 'timeout' => 120 175 175 )); 176 176 … … 239 239 'generationConfig' => array( 240 240 'temperature' => 0.7, 241 'maxOutputTokens' => intval($args['word_count']) * 3 // Increased from *2 to *3 for complete content241 'maxOutputTokens' => intval($args['word_count']) * 5 // v4.1.2: Increased from *3 to *5 for non-English languages 242 242 ) 243 243 ); … … 245 245 $model = $args['model'] ?: 'gemini-1.5-pro'; 246 246 $url = 'https://generativelanguage.googleapis.com/v1beta/models/' . $model . ':generateContent?key=' . $args['api_key']; 247 247 248 248 $response = wp_remote_post($url, array( 249 249 'headers' => array('Content-Type' => 'application/json'), 250 250 'body' => json_encode($request_data), 251 'timeout' => 60251 'timeout' => 120 252 252 )); 253 253 … … 333 333 'model' => $args['model'] ?: 'deepseek-chat', 334 334 'messages' => array(array('role' => 'user', 'content' => $prompt)), 335 'max_tokens' => intval($args['word_count']) * 3, // Increased from *2 to *3 for complete content335 'max_tokens' => intval($args['word_count']) * 5, // v4.1.2: Increased from *3 to *5 for non-English languages 336 336 'temperature' => 0.7 337 337 ); 338 338 339 339 $response = wp_remote_post('https://api.deepseek.com/v1/chat/completions', array( 340 340 'headers' => array( … … 343 343 ), 344 344 'body' => json_encode($request_data), 345 'timeout' => 60345 'timeout' => 120 346 346 )); 347 347 … … 416 416 'model' => $args['model'] ?: 'command-r-plus', 417 417 'message' => $prompt, 418 'max_tokens' => intval($args['word_count']) * 3, // Increased from *2 to *3 for complete content418 'max_tokens' => intval($args['word_count']) * 5, // v4.1.2: Increased from *3 to *5 for non-English languages 419 419 'temperature' => 0.7 420 420 ); 421 421 422 422 $response = wp_remote_post('https://api.cohere.ai/v1/chat', array( 423 423 'headers' => array( … … 426 426 ), 427 427 'body' => json_encode($request_data), 428 'timeout' => 60428 'timeout' => 120 429 429 )); 430 430 … … 500 500 'model' => $args['model'] ?: 'llama-3.3-70b-versatile', 501 501 'messages' => array(array('role' => 'user', 'content' => $prompt)), 502 'max_tokens' => intval($args['word_count']) * 3,502 'max_tokens' => intval($args['word_count']) * 5, // v4.1.2: Increased from *3 to *5 for non-English languages 503 503 'temperature' => 0.7 504 504 ); … … 510 510 ), 511 511 'body' => json_encode($request_data), 512 'timeout' => 60512 'timeout' => 120 513 513 )); 514 514 … … 584 584 'model' => $args['model'] ?: 'meta-llama/Meta-Llama-3.1-405B-Instruct', 585 585 'messages' => array(array('role' => 'user', 'content' => $prompt)), 586 'max_tokens' => intval($args['word_count']) * 3,586 'max_tokens' => intval($args['word_count']) * 5, // v4.1.2: Increased from *3 to *5 for non-English languages 587 587 'temperature' => 0.7 588 588 ); … … 594 594 ), 595 595 'body' => json_encode($request_data), 596 'timeout' => 60596 'timeout' => 120 597 597 )); 598 598 … … 679 679 'model' => $args['model'] ?: 'mistral-large-latest', 680 680 'messages' => array(array('role' => 'user', 'content' => $prompt)), 681 'max_tokens' => intval($args['word_count']) * 3,681 'max_tokens' => intval($args['word_count']) * 5, // v4.1.2: Increased from *3 to *5 for non-English languages 682 682 'temperature' => 0.7 683 683 ); … … 689 689 ), 690 690 'body' => json_encode($request_data), 691 'timeout' => 60691 'timeout' => 120 692 692 )); 693 693 … … 766 766 array('role' => 'user', 'content' => $prompt) 767 767 ), 768 'max_tokens' => intval($args['word_count']) * 3,768 'max_tokens' => intval($args['word_count']) * 5, // v4.1.2: Increased from *3 to *5 for non-English languages 769 769 'temperature' => 0.7 770 770 ); … … 776 776 ), 777 777 'body' => json_encode($request_data), 778 'timeout' => 60778 'timeout' => 120 779 779 )); 780 780 -
technodrome-ai-content-assistant/trunk/includes/class-ajax-handler.php
r3468923 r3471585 988 988 'edit_link' => esc_url(get_edit_post_link($post_id)), 989 989 'view_link' => esc_url(get_permalink($post_id)), 990 'excerpt' => esc_html(wp_trim_words(get_the_content(), 25)),990 'excerpt' => esc_html(wp_trim_words(get_the_content(), 75)), 991 991 ]; 992 992 } -
technodrome-ai-content-assistant/trunk/readme.txt
r3471565 r3471585 5 5 Tested up to: 6.9 6 6 Requires PHP: 8.0 7 Stable tag: 4.1. 17 Stable tag: 4.1.2 8 8 License: GPL v2 or later 9 9 License URI: https://www.gnu.org/licenses/gpl-2.0.html … … 40 40 41 41 == Changelog == 42 43 = 4.1.2 (2026-02-28) = 44 * **FIX**: AI content no longer truncates mid-sentence for non-English languages (Serbian, Croatian, etc.) 45 * **IMPROVED**: `max_tokens` multiplier increased from x3 to x5 — non-English text uses more tokens per word 46 * **IMPROVED**: API request timeout increased from 60s to 120s — prevents timeout on longer content generation 47 * **AFFECTS**: All 9 providers: OpenAI, Anthropic, Google, DeepSeek, Cohere, Groq, Together AI, Mistral, GLM 42 48 43 49 = 4.1.1 (2026-02-28) = -
technodrome-ai-content-assistant/trunk/technodrome-ai-content-assistant.php
r3471565 r3471585 4 4 * Plugin URI: https://technodrome.org/ai-content-assistant 5 5 * Description: Advanced AI content generation plugin with multiple AI providers, profile system, layout templates, and content rules for WordPress. 6 * Version: 4.1. 16 * Version: 4.1.2 7 7 * Author: Technodrome Team 8 8 * Author URI: https://technodrome.org … … 30 30 31 31 // Plugin constants 32 define('TAICS_VERSION', '4.1. 1');32 define('TAICS_VERSION', '4.1.2'); 33 33 define('TAICS_PLUGIN_FILE', __FILE__); 34 34 define('TAICS_PLUGIN_DIR', plugin_dir_path(__FILE__));
Note: See TracChangeset
for help on using the changeset viewer.