Skip to content

Commit 18fb470

Browse files
memory
1 parent 2bbc811 commit 18fb470

4 files changed

Lines changed: 911 additions & 33 deletions

File tree

apps/server/src/services/auto-mode-service.ts

Lines changed: 147 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@ import {
1616
isAbortError,
1717
classifyError,
1818
loadContextFiles,
19+
appendLearning,
20+
recordMemoryUsage,
1921
} from '@automaker/utils';
2022
import { resolveModelString, DEFAULT_MODELS } from '@automaker/model-resolver';
2123
import { resolveDependencies, areDependenciesSatisfied } from '@automaker/dependency-resolver';
@@ -311,6 +313,8 @@ export class AutoModeService {
311313
projectPath,
312314
});
313315

316+
// Note: Memory folder initialization is now handled by loadContextFiles
317+
314318
// Run the loop in the background
315319
this.runAutoLoop().catch((error) => {
316320
console.error('[AutoMode] Loop error:', error);
@@ -504,15 +508,17 @@ export class AutoModeService {
504508

505509
// Build the prompt - use continuation prompt if provided (for recovery after plan approval)
506510
let prompt: string;
507-
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) - passed as system prompt
511+
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
512+
// Context loader now automatically includes memory from .automaker/memory/
508513
const contextResult = await loadContextFiles({
509514
projectPath,
510515
fsModule: secureFs as Parameters<typeof loadContextFiles>[0]['fsModule'],
511516
});
512517

513518
// When autoLoadClaudeMd is enabled, filter out CLAUDE.md to avoid duplication
514519
// (SDK handles CLAUDE.md via settingSources), but keep other context files like CODE_QUALITY.md
515-
const contextFilesPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
520+
// Note: contextResult.formattedPrompt now includes both context AND memory
521+
const combinedSystemPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
516522

517523
if (options?.continuationPrompt) {
518524
// Continuation prompt is used when recovering from a plan approval
@@ -558,7 +564,7 @@ export class AutoModeService {
558564
projectPath,
559565
planningMode: feature.planningMode,
560566
requirePlanApproval: feature.requirePlanApproval,
561-
systemPrompt: contextFilesPrompt || undefined,
567+
systemPrompt: combinedSystemPrompt || undefined,
562568
autoLoadClaudeMd,
563569
}
564570
);
@@ -589,6 +595,36 @@ export class AutoModeService {
589595
// Record success to reset consecutive failure tracking
590596
this.recordSuccess();
591597

598+
// Record learnings and memory usage after successful feature completion
599+
try {
600+
const featureDir = getFeatureDir(projectPath, featureId);
601+
const outputPath = path.join(featureDir, 'agent-output.md');
602+
let agentOutput = '';
603+
try {
604+
const outputContent = await secureFs.readFile(outputPath, 'utf-8');
605+
agentOutput =
606+
typeof outputContent === 'string' ? outputContent : outputContent.toString();
607+
} catch {
608+
// Agent output might not exist yet
609+
}
610+
611+
// Record memory usage if we loaded any memory files
612+
if (contextResult.memoryFiles.length > 0 && agentOutput) {
613+
await recordMemoryUsage(
614+
projectPath,
615+
contextResult.memoryFiles,
616+
agentOutput,
617+
true, // success
618+
secureFs as Parameters<typeof recordMemoryUsage>[4]
619+
);
620+
}
621+
622+
// Extract and record learnings from the agent output
623+
await this.recordLearningsFromFeature(projectPath, feature, agentOutput);
624+
} catch (learningError) {
625+
console.warn('[AutoMode] Failed to record learnings:', learningError);
626+
}
627+
592628
this.emitAutoModeEvent('auto_mode_feature_complete', {
593629
featureId,
594630
passes: true,
@@ -2745,4 +2781,112 @@ Begin implementing task ${task.id} now.`;
27452781
}
27462782
});
27472783
}
2784+
2785+
/**
2786+
* Extract and record learnings from a completed feature
2787+
* Uses a quick Claude call to identify important decisions and patterns
2788+
*/
2789+
private async recordLearningsFromFeature(
2790+
projectPath: string,
2791+
feature: Feature,
2792+
agentOutput: string
2793+
): Promise<void> {
2794+
if (!agentOutput || agentOutput.length < 100) {
2795+
// Not enough output to extract learnings from
2796+
return;
2797+
}
2798+
2799+
// Limit output to avoid token limits
2800+
const truncatedOutput = agentOutput.length > 10000 ? agentOutput.slice(-10000) : agentOutput;
2801+
2802+
const userPrompt = `Based on implementing "${feature.title}", identify any important learnings.
2803+
2804+
Look for:
2805+
1. DECISIONS made that future developers should understand (architecture, library choices, patterns)
2806+
2. GOTCHAS or edge cases encountered that should be avoided
2807+
3. PATTERNS that worked well and should be reused
2808+
2809+
For each finding, respond with JSON:
2810+
{
2811+
"learnings": [
2812+
{
2813+
"category": "architecture|api|ui|terminals|auth|testing|gotchas",
2814+
"type": "decision|gotcha|pattern|learning",
2815+
"content": "Brief description",
2816+
"why": "For decisions: why this choice was made",
2817+
"rejected": "For decisions: what alternative was rejected",
2818+
"breaking": "For decisions: what would break if changed"
2819+
}
2820+
]
2821+
}
2822+
2823+
Only include genuinely useful, non-obvious learnings. Return {"learnings": []} if nothing notable.
2824+
2825+
Feature context:
2826+
${truncatedOutput}`;
2827+
2828+
try {
2829+
// Import query dynamically to avoid circular dependencies
2830+
const { query } = await import('@anthropic-ai/claude-agent-sdk');
2831+
const { CLAUDE_MODEL_MAP } = await import('@automaker/model-resolver');
2832+
2833+
// Use a quick model for extraction
2834+
const stream = query({
2835+
prompt: userPrompt,
2836+
options: {
2837+
model: CLAUDE_MODEL_MAP.haiku,
2838+
maxTurns: 1,
2839+
allowedTools: [],
2840+
permissionMode: 'acceptEdits',
2841+
},
2842+
});
2843+
2844+
// Extract text from stream
2845+
let responseText = '';
2846+
for await (const msg of stream) {
2847+
if (msg.type === 'assistant' && msg.message?.content) {
2848+
for (const block of msg.message.content) {
2849+
if (block.type === 'text' && block.text) {
2850+
responseText += block.text;
2851+
}
2852+
}
2853+
} else if (msg.type === 'result' && msg.subtype === 'success') {
2854+
responseText = msg.result || responseText;
2855+
}
2856+
}
2857+
2858+
// Parse the response
2859+
const jsonMatch = responseText.match(/\{[\s\S]*"learnings"[\s\S]*\}/);
2860+
if (!jsonMatch) return;
2861+
2862+
const parsed = JSON.parse(jsonMatch[0]);
2863+
if (!parsed.learnings || !Array.isArray(parsed.learnings)) return;
2864+
2865+
// Record each learning
2866+
for (const learning of parsed.learnings) {
2867+
if (!learning.category || !learning.content) continue;
2868+
2869+
await appendLearning(
2870+
projectPath,
2871+
{
2872+
category: learning.category,
2873+
type: learning.type || 'learning',
2874+
content: learning.content,
2875+
why: learning.why,
2876+
rejected: learning.rejected,
2877+
breaking: learning.breaking,
2878+
},
2879+
secureFs as Parameters<typeof appendLearning>[2]
2880+
);
2881+
}
2882+
2883+
if (parsed.learnings.length > 0) {
2884+
console.log(
2885+
`[AutoMode] Recorded ${parsed.learnings.length} learning(s) from feature ${feature.id}`
2886+
);
2887+
}
2888+
} catch (error) {
2889+
console.warn('[AutoMode] Failed to extract learnings:', error);
2890+
}
2891+
}
27482892
}

0 commit comments

Comments
 (0)