minimal working version of backend and cli

This commit is contained in:
2025-10-14 01:58:24 -04:00
parent 839f0c9107
commit 2fdb1ece43
9 changed files with 615 additions and 48 deletions

View File

@@ -1,9 +1,34 @@
package glue
import (
"bytes"
"context"
"encoding/json"
"errors"
"net/http"
"time"
"mind/internal/config"
"mind/internal/db"
"mind/internal/models"
)
type Glue struct {
repo *db.Repo
cfg config.Config
}
// NewGlue now receives cfg
func NewGlue(r *db.Repo, cfg config.Config) *Glue { return &Glue{repo: r, cfg: cfg} }
func (g *Glue) CreateConversation(ctx context.Context, ownerID int64, title string) (int64, error) {
return g.repo.CreateConversation(ctx, ownerID, title)
}
func (g *Glue) ListConversations(ctx context.Context, ownerID int64) ([]models.Conversation, error) {
return g.repo.ListConversations(ctx, ownerID)
}
type CompletionReq struct {
ConversationID int64 `json:"conversation_id"`
BranchName string `json:"branch"`
@@ -11,29 +36,78 @@ type CompletionReq struct {
}
type CompletionResp struct {
PromptNodeID int64 `json:"prompt_node_id"`
AnswerNodeID int64 `json:"answer_node_id"`
Answer string `json:"answer"`
PromptNodeID int64 `json:"prompt_node_id"`
AnswerNodeID int64 `json:"answer_node_id"`
Answer string `json:"answer"`
}
// llama request/response (support two common shapes)
type llamaReq struct {
Prompt string `json:"prompt"`
}
type llamaResp struct {
Content string `json:"content"`
}
func (g *Glue) callLlama(ctx context.Context, prompt string) (string, error) {
if g.cfg.LlamaURL == "" {
return "", errors.New("LLAMA_URL not set")
}
body, _ := json.Marshal(llamaReq{Prompt: prompt})
url := g.cfg.LlamaURL + "/completion"
httpc := &http.Client{Timeout: 45 * time.Second}
req, _ := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body))
req.Header.Set("Content-Type", "application/json")
resp, err := httpc.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
var a llamaResp
if err := dec.Decode(&a); err == nil && a.Content != "" {
return a.Content, nil
}
return "", errors.New("unexpected llama response")
}
// For v0 we stub the answer as a simple echo with a prefix.
func (g *Glue) AppendCompletion(ctx context.Context, req CompletionReq) (CompletionResp, error) {
b, err := g.repo.GetBranch(ctx, req.ConversationID, req.BranchName)
if err != nil { return CompletionResp{}, err }
if err != nil {
return CompletionResp{}, err
}
// 1) create user prompt node
// 1) user prompt node
promptID, err := g.repo.CreateNode(ctx, req.ConversationID, "user", req.Prompt)
if err != nil { return CompletionResp{}, err }
if err := g.repo.Link(ctx, b.HeadNodeID, promptID); err != nil { return CompletionResp{}, err }
if err != nil {
return CompletionResp{}, err
}
if err := g.repo.Link(ctx, b.HeadNodeID, promptID); err != nil {
return CompletionResp{}, err
}
// 2) real llama call (fallback to stub if it fails)
answerText, err := g.callLlama(ctx, req.Prompt)
if err != nil {
answerText = "(stub) You said: " + req.Prompt
}
// 2) create assistant answer node (stub)
answerText := "(stub) You said: " + req.Prompt
answerID, err := g.repo.CreateNode(ctx, req.ConversationID, "assistant", answerText)
if err != nil { return CompletionResp{}, err }
if err := g.repo.Link(ctx, promptID, answerID); err != nil { return CompletionResp{}, err }
if err != nil {
return CompletionResp{}, err
}
if err := g.repo.Link(ctx, promptID, answerID); err != nil {
return CompletionResp{}, err
}
// 3) move branch head
if err := g.repo.MoveBranchHead(ctx, b.ID, answerID); err != nil { return CompletionResp{}, err }
if err := g.repo.MoveBranchHead(ctx, b.ID, answerID); err != nil {
return CompletionResp{}, err
}
return CompletionResp{PromptNodeID: promptID, AnswerNodeID: answerID, Answer: answerText}, nil
}