1
0
mirror of https://github.com/coalaura/whiskr.git synced 2025-09-08 17:06:42 +00:00

use yml for config

This commit is contained in:
Laura
2025-08-16 15:15:06 +02:00
parent f5f984a46f
commit abefbf1b92
12 changed files with 71 additions and 127 deletions

View File

@@ -1,11 +0,0 @@
# Your openrouter.ai token (required)
OPENROUTER_TOKEN = ""
# Your exa-search token (optional)
EXA_TOKEN = ""
# How many messages/tool calls before the model is cut-off (optional, default: 3)
MAX_ITERATIONS = 3
# Replace unicode quotes, dashes, etc. in the assistants output (optional, default: false)
CLEAN_CONTENT = true

2
.gitignore vendored
View File

@@ -1,2 +1,2 @@
.env
config.yml
debug.json

View File

@@ -122,7 +122,7 @@ func (r *Request) Parse() (*openrouter.ChatCompletionRequest, error) {
request.Messages = append(request.Messages, openrouter.SystemMessage(prompt))
}
if model.Tools && r.Search && ExaToken != "" {
if model.Tools && r.Search && env.Tokens.Exa != "" {
request.Tools = GetSearchTools()
request.ToolChoice = "auto"
@@ -216,10 +216,10 @@ func HandleChat(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
for iteration := range MaxIterations {
debug("iteration %d of %d", iteration+1, MaxIterations)
for iteration := range env.Settings.MaxIterations {
debug("iteration %d of %d", iteration+1, env.Settings.MaxIterations)
if iteration == MaxIterations-1 {
if iteration == env.Settings.MaxIterations-1 {
debug("no more tool calls")
request.Tools = nil

View File

@@ -12,7 +12,7 @@ var cleaner = strings.NewReplacer(
)
func CleanChunk(chunk string) string {
if !CleanContent {
if !env.Settings.CleanContent {
return chunk
}

View File

@@ -6,7 +6,7 @@ import (
)
func dump(name string, val any) {
if !Debug {
if !env.Debug {
return
}
@@ -15,7 +15,7 @@ func dump(name string, val any) {
}
func debug(format string, args ...any) {
if !Debug {
if !env.Debug {
return
}

74
env.go
View File

@@ -2,59 +2,59 @@ package main
import (
"errors"
"fmt"
"os"
"strconv"
"github.com/joho/godotenv"
"github.com/goccy/go-yaml"
)
var (
Debug bool
type EnvTokens struct {
OpenRouter string `json:"openrouter"`
Exa string `json:"exa"`
}
CleanContent bool
MaxIterations int
type EnvSettings struct {
CleanContent bool `json:"cleanContent"`
MaxIterations uint `json:"maxIterations"`
}
OpenRouterToken string
ExaToken string
)
type Environment struct {
Debug bool `json:"debug"`
Tokens EnvTokens `json:"tokens"`
Settings EnvSettings `json:"settings"`
}
var env Environment
func init() {
log.MustPanic(godotenv.Load())
file, err := os.OpenFile("config.yml", os.O_RDONLY, 0)
log.MustPanic(err)
// enable debug logs & prints
Debug = os.Getenv("DEBUG") == "true"
defer file.Close()
if Debug {
err = yaml.NewDecoder(file).Decode(&env)
log.MustPanic(err)
log.MustPanic(env.Init())
}
func (e *Environment) Init() error {
// print if debug is enabled
if e.Debug {
log.Warning("Debug mode enabled")
}
// de-ai assistant response content
CleanContent = os.Getenv("DEBUG") == "true"
// check max iterations
e.Settings.MaxIterations = max(e.Settings.MaxIterations, 1)
// maximum amount of iterations per turn
if env := os.Getenv("MAX_ITERATIONS"); env != "" {
iterations, err := strconv.Atoi(env)
if err != nil {
log.Panic(fmt.Errorf("invalid max iterations: %v", err))
}
if iterations < 1 {
log.Panic(errors.New("max iterations has to be 1 or more"))
}
MaxIterations = iterations
} else {
MaxIterations = 3
// check if openrouter token is set
if e.Tokens.OpenRouter == "" {
return errors.New("missing tokens.openrouter")
}
// openrouter token used for all completions & model list
if OpenRouterToken = os.Getenv("OPENROUTER_TOKEN"); OpenRouterToken == "" {
log.Panic(errors.New("missing openrouter token"))
// check if exa token is set
if e.Tokens.Exa == "" {
log.Warning("missing token.exa, web search unavailable")
}
// optional exa token used for search tools
if ExaToken = os.Getenv("EXA_TOKEN"); ExaToken == "" {
log.Warning("missing exa token, web search unavailable")
}
return nil
}

2
exa.go
View File

@@ -71,7 +71,7 @@ func NewExaRequest(ctx context.Context, path string, data any) (*http.Request, e
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Api-Key", ExaToken)
req.Header.Set("X-Api-Key", env.Tokens.Exa)
return req, nil
}

14
example.config.yml Normal file
View File

@@ -0,0 +1,14 @@
# Enable verbose logging and extra diagnostics
debug: false
tokens:
# Your openrouter.ai token (required)
openrouter: ""
# Your exa-search token (optional, for search tools)
exa: ""
settings:
# Replace unicode quotes, dashes, etc. in the assistants output (optional, default: false)
cleanContent: true
# How many messages/tool calls before the model is cut off (optional, default: 3)
maxIterations: 3

4
go.mod
View File

@@ -5,7 +5,7 @@ go 1.24.5
require (
github.com/coalaura/logger v1.5.1
github.com/go-chi/chi/v5 v5.2.2
github.com/joho/godotenv v1.5.1
github.com/goccy/go-yaml v1.18.0
github.com/revrost/go-openrouter v0.2.1
)
@@ -17,5 +17,5 @@ require (
github.com/rs/zerolog v1.34.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/sys v0.35.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/term v0.34.0 // indirect
)

8
go.sum
View File

@@ -7,11 +7,11 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
@@ -38,7 +38,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -35,7 +35,7 @@ func main() {
r.Get("/-/data", func(w http.ResponseWriter, r *http.Request) {
RespondJson(w, http.StatusOK, map[string]any{
"version": Version,
"search": ExaToken != "",
"search": env.Tokens.Exa != "",
"models": models,
})
})

View File

@@ -2,50 +2,16 @@ package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"github.com/revrost/go-openrouter"
)
type Generation struct {
ID string `json:"id"`
TotalCost float64 `json:"total_cost"`
CreatedAt string `json:"created_at"`
Model string `json:"model"`
Origin string `json:"origin"`
Usage float64 `json:"usage"`
IsBYOK bool `json:"is_byok"`
UpstreamID *string `json:"upstream_id"`
CacheDiscount *float64 `json:"cache_discount"`
UpstreamInferenceCost *float64 `json:"upstream_inference_cost"`
AppID *int `json:"app_id"`
Streamed *bool `json:"streamed"`
Cancelled *bool `json:"cancelled"`
ProviderName *string `json:"provider_name"`
Latency *int `json:"latency"`
ModerationLatency *int `json:"moderation_latency"`
GenerationTime *int `json:"generation_time"`
FinishReason *string `json:"finish_reason"`
NativeFinishReason *string `json:"native_finish_reason"`
TokensPrompt *int `json:"tokens_prompt"`
TokensCompletion *int `json:"tokens_completion"`
NativeTokensPrompt *int `json:"native_tokens_prompt"`
NativeTokensCompletion *int `json:"native_tokens_completion"`
NativeTokensReasoning *int `json:"native_tokens_reasoning"`
NumMediaPrompt *int `json:"num_media_prompt"`
NumMediaCompletion *int `json:"num_media_completion"`
NumSearchResults *int `json:"num_search_results"`
}
func init() {
openrouter.DisableLogs()
}
func OpenRouterClient() *openrouter.Client {
return openrouter.NewClient(OpenRouterToken)
return openrouter.NewClient(env.Tokens.OpenRouter)
}
func OpenRouterStartStream(ctx context.Context, request openrouter.ChatCompletionRequest) (*openrouter.ChatCompletionStream, error) {
@@ -65,33 +31,8 @@ func OpenRouterRun(ctx context.Context, request openrouter.ChatCompletionRequest
return client.CreateChatCompletion(ctx, request)
}
func OpenRouterGetGeneration(ctx context.Context, id string) (*Generation, error) {
req, err := http.NewRequest("GET", fmt.Sprintf("https://openrouter.ai/api/v1/generation?id=%s", id), nil)
if err != nil {
return nil, err
}
func OpenRouterGetGeneration(ctx context.Context, id string) (openrouter.Generation, error) {
client := OpenRouterClient()
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", OpenRouterToken))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, errors.New(resp.Status)
}
var response struct {
Data Generation `json:"data"`
}
err = json.NewDecoder(resp.Body).Decode(&response)
if err != nil {
return nil, err
}
return &response.Data, nil
return client.GetGeneration(ctx, id)
}