114 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			114 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| package glue
 | |
| 
 | |
| import (
 | |
| 	"bytes"
 | |
| 	"context"
 | |
| 	"encoding/json"
 | |
| 	"errors"
 | |
| 	"net/http"
 | |
| 	"time"
 | |
| 
 | |
| 	"mind/internal/config"
 | |
| 	"mind/internal/db"
 | |
| 	"mind/internal/models"
 | |
| )
 | |
| 
 | |
| type Glue struct {
 | |
| 	repo *db.Repo
 | |
| 	cfg  config.Config
 | |
| }
 | |
| 
 | |
| // NewGlue now receives cfg
 | |
| func NewGlue(r *db.Repo, cfg config.Config) *Glue { return &Glue{repo: r, cfg: cfg} }
 | |
| 
 | |
| func (g *Glue) CreateConversation(ctx context.Context, ownerID int64, title string) (int64, error) {
 | |
| 	return g.repo.CreateConversation(ctx, ownerID, title)
 | |
| }
 | |
| 
 | |
| func (g *Glue) ListConversations(ctx context.Context, ownerID int64) ([]models.Conversation, error) {
 | |
| 	return g.repo.ListConversations(ctx, ownerID)
 | |
| }
 | |
| 
 | |
| type CompletionReq struct {
 | |
| 	ConversationID int64  `json:"conversation_id"`
 | |
| 	BranchName     string `json:"branch"`
 | |
| 	Prompt         string `json:"prompt"`
 | |
| }
 | |
| 
 | |
| type CompletionResp struct {
 | |
| 	PromptNodeID int64  `json:"prompt_node_id"`
 | |
| 	AnswerNodeID int64  `json:"answer_node_id"`
 | |
| 	Answer       string `json:"answer"`
 | |
| }
 | |
| 
 | |
| // llama request/response (support two common shapes)
 | |
| type llamaReq struct {
 | |
| 	Prompt string `json:"prompt"`
 | |
| }
 | |
| type llamaResp struct {
 | |
| 	Content string `json:"content"`
 | |
| }
 | |
| 
 | |
| func (g *Glue) callLlama(ctx context.Context, prompt string) (string, error) {
 | |
| 	if g.cfg.LlamaURL == "" {
 | |
| 		return "", errors.New("LLAMA_URL not set")
 | |
| 	}
 | |
| 	body, _ := json.Marshal(llamaReq{Prompt: prompt})
 | |
| 	url := g.cfg.LlamaURL + "/completion"
 | |
| 
 | |
| 	httpc := &http.Client{Timeout: 45 * time.Second}
 | |
| 
 | |
| 	req, _ := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body))
 | |
| 	req.Header.Set("Content-Type", "application/json")
 | |
| 	resp, err := httpc.Do(req)
 | |
| 	if err != nil {		
 | |
| 		return "", err
 | |
| 	}
 | |
| 	defer resp.Body.Close()
 | |
| 	
 | |
| 	dec := json.NewDecoder(resp.Body)
 | |
| 	var a llamaResp
 | |
| 	if err := dec.Decode(&a); err == nil && a.Content != "" {
 | |
| 		return a.Content, nil
 | |
| 	}
 | |
| 
 | |
| 	return "", errors.New("unexpected llama response")
 | |
| }
 | |
| 
 | |
| func (g *Glue) AppendCompletion(ctx context.Context, req CompletionReq) (CompletionResp, error) {
 | |
| 	b, err := g.repo.GetBranch(ctx, req.ConversationID, req.BranchName)
 | |
| 	if err != nil {
 | |
| 		return CompletionResp{}, err
 | |
| 	}
 | |
| 
 | |
| 	// 1) user prompt node
 | |
| 	promptID, err := g.repo.CreateNode(ctx, req.ConversationID, "user", req.Prompt)
 | |
| 	if err != nil {
 | |
| 		return CompletionResp{}, err
 | |
| 	}
 | |
| 	if err := g.repo.Link(ctx, b.HeadNodeID, promptID); err != nil {
 | |
| 		return CompletionResp{}, err
 | |
| 	}
 | |
| 
 | |
| 	// 2) real llama call (fallback to stub if it fails)
 | |
| 	answerText, err := g.callLlama(ctx, req.Prompt)
 | |
| 	if err != nil {
 | |
| 		answerText = "(stub) You said: " + req.Prompt
 | |
| 	}
 | |
| 
 | |
| 	answerID, err := g.repo.CreateNode(ctx, req.ConversationID, "assistant", answerText)
 | |
| 	if err != nil {
 | |
| 		return CompletionResp{}, err
 | |
| 	}
 | |
| 	if err := g.repo.Link(ctx, promptID, answerID); err != nil {
 | |
| 		return CompletionResp{}, err
 | |
| 	}
 | |
| 
 | |
| 	// 3) move branch head
 | |
| 	if err := g.repo.MoveBranchHead(ctx, b.ID, answerID); err != nil {
 | |
| 		return CompletionResp{}, err
 | |
| 	}
 | |
| 
 | |
| 	return CompletionResp{PromptNodeID: promptID, AnswerNodeID: answerID, Answer: answerText}, nil
 | |
| }
 |