-
Notifications
You must be signed in to change notification settings - Fork 1.3k
Expand file tree
/
Copy pathchats.go
More file actions
2981 lines (2704 loc) · 124 KB
/
chats.go
File metadata and controls
2981 lines (2704 loc) · 124 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package codersdk
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/invopop/jsonschema"
"github.com/shopspring/decimal"
"golang.org/x/xerrors"
"github.com/coder/websocket"
"github.com/coder/websocket/wsjson"
)
// ChatCompactionThresholdKeyPrefix scopes per-model chat compaction
// threshold settings.
const ChatCompactionThresholdKeyPrefix = "chat_compaction_threshold_pct:"
// MaxChatFileIDs is the maximum number of file IDs that can be
// associated with a single chat. This limit prevents unbounded
// growth in the chat_file_links table. It is easier to raise
// this limit than to lower it.
const MaxChatFileIDs = 20
// CompactionThresholdKey returns the user-config key for a specific
// model configuration's compaction threshold.
func CompactionThresholdKey(modelConfigID uuid.UUID) string {
return ChatCompactionThresholdKeyPrefix + modelConfigID.String()
}
// ChatStatus represents the status of a chat.
type ChatStatus string
const (
ChatStatusWaiting ChatStatus = "waiting"
ChatStatusPending ChatStatus = "pending"
ChatStatusRunning ChatStatus = "running"
ChatStatusPaused ChatStatus = "paused"
ChatStatusCompleted ChatStatus = "completed"
ChatStatusError ChatStatus = "error"
ChatStatusRequiresAction ChatStatus = "requires_action"
)
// ChatClientType indicates whether a chat was created from the
// web UI or programmatically via the API.
type ChatClientType string
const (
ChatClientTypeUI ChatClientType = "ui"
ChatClientTypeAPI ChatClientType = "api"
)
// Chat represents a chat session with an AI agent.
type Chat struct {
ID uuid.UUID `json:"id" format:"uuid"`
OrganizationID uuid.UUID `json:"organization_id" format:"uuid"`
OwnerID uuid.UUID `json:"owner_id" format:"uuid"`
WorkspaceID *uuid.UUID `json:"workspace_id,omitempty" format:"uuid"`
BuildID *uuid.UUID `json:"build_id,omitempty" format:"uuid"`
AgentID *uuid.UUID `json:"agent_id,omitempty" format:"uuid"`
ParentChatID *uuid.UUID `json:"parent_chat_id,omitempty" format:"uuid"`
RootChatID *uuid.UUID `json:"root_chat_id,omitempty" format:"uuid"`
LastModelConfigID uuid.UUID `json:"last_model_config_id" format:"uuid"`
Title string `json:"title"`
Status ChatStatus `json:"status"`
PlanMode ChatPlanMode `json:"plan_mode,omitempty"`
LastError *string `json:"last_error"`
DiffStatus *ChatDiffStatus `json:"diff_status,omitempty"`
CreatedAt time.Time `json:"created_at" format:"date-time"`
UpdatedAt time.Time `json:"updated_at" format:"date-time"`
Archived bool `json:"archived"`
PinOrder int32 `json:"pin_order"`
MCPServerIDs []uuid.UUID `json:"mcp_server_ids" format:"uuid"`
Labels map[string]string `json:"labels"`
Files []ChatFileMetadata `json:"files,omitempty"`
// HasUnread is true when assistant messages exist beyond
// the owner's read cursor, which updates on stream
// connect and disconnect.
HasUnread bool `json:"has_unread"`
// LastInjectedContext holds the most recently persisted
// injected context parts (AGENTS.md files and skills). It
// is updated only when context changes, on first workspace
// attach or agent change.
LastInjectedContext []ChatMessagePart `json:"last_injected_context,omitempty"`
Warnings []string `json:"warnings,omitempty"`
ClientType ChatClientType `json:"client_type"`
// Children holds child (subagent) chats nested under this root
// chat. Always initialized to an empty slice so the JSON field
// is present as []. Child chats cannot create their own
// subagents, so nesting depth is capped at 1 and this slice is
// always empty for child chats.
Children []Chat `json:"children"`
}
// ChatFileMetadata contains lightweight metadata about a file
// associated with a chat, excluding the file content itself.
type ChatFileMetadata struct {
ID uuid.UUID `json:"id" format:"uuid"`
OwnerID uuid.UUID `json:"owner_id" format:"uuid"`
OrganizationID uuid.UUID `json:"organization_id" format:"uuid"`
Name string `json:"name"`
MimeType string `json:"mime_type"`
CreatedAt time.Time `json:"created_at" format:"date-time"`
}
// ChatMessage represents a single message in a chat.
type ChatMessage struct {
ID int64 `json:"id"`
ChatID uuid.UUID `json:"chat_id" format:"uuid"`
CreatedBy *uuid.UUID `json:"created_by,omitempty" format:"uuid"`
ModelConfigID *uuid.UUID `json:"model_config_id,omitempty" format:"uuid"`
CreatedAt time.Time `json:"created_at" format:"date-time"`
Role ChatMessageRole `json:"role"`
Content []ChatMessagePart `json:"content,omitempty"`
Usage *ChatMessageUsage `json:"usage,omitempty"`
}
// ChatMessageUsage contains token usage information for a chat message.
type ChatMessageUsage struct {
InputTokens *int64 `json:"input_tokens,omitempty"`
OutputTokens *int64 `json:"output_tokens,omitempty"`
TotalTokens *int64 `json:"total_tokens,omitempty"`
ReasoningTokens *int64 `json:"reasoning_tokens,omitempty"`
CacheCreationTokens *int64 `json:"cache_creation_tokens,omitempty"`
CacheReadTokens *int64 `json:"cache_read_tokens,omitempty"`
ContextLimit *int64 `json:"context_limit,omitempty"`
}
// ChatMessageRole represents the role of a chat message sender.
type ChatMessageRole string
// ChatMessageRole enums.
const (
ChatMessageRoleSystem ChatMessageRole = "system"
ChatMessageRoleUser ChatMessageRole = "user"
ChatMessageRoleAssistant ChatMessageRole = "assistant"
ChatMessageRoleTool ChatMessageRole = "tool"
)
// ChatMessagePartType represents a structured message part type.
type ChatMessagePartType string
const (
ChatMessagePartTypeText ChatMessagePartType = "text"
ChatMessagePartTypeReasoning ChatMessagePartType = "reasoning"
ChatMessagePartTypeToolCall ChatMessagePartType = "tool-call"
ChatMessagePartTypeToolResult ChatMessagePartType = "tool-result"
ChatMessagePartTypeSource ChatMessagePartType = "source"
ChatMessagePartTypeFile ChatMessagePartType = "file"
ChatMessagePartTypeFileReference ChatMessagePartType = "file-reference"
ChatMessagePartTypeContextFile ChatMessagePartType = "context-file"
ChatMessagePartTypeSkill ChatMessagePartType = "skill"
)
// AllChatMessagePartTypes returns all known ChatMessagePartType values.
func AllChatMessagePartTypes() []ChatMessagePartType {
return []ChatMessagePartType{
ChatMessagePartTypeText,
ChatMessagePartTypeReasoning,
ChatMessagePartTypeToolCall,
ChatMessagePartTypeToolResult,
ChatMessagePartTypeSource,
ChatMessagePartTypeFile,
ChatMessagePartTypeFileReference,
ChatMessagePartTypeContextFile,
ChatMessagePartTypeSkill,
}
}
// ChatMessagePart is a structured chunk of a chat message.
//
// WARNING: This type is both an API wire type and a database
// persistence format. Its JSON layout is stored in the
// chat_messages.content column. Field additions, renames, type
// changes, and omitempty behavior all affect backward-compatible
// deserialization of stored rows. Treat changes to this struct
// with the same care as a database migration.
//
// The variants struct tag declares which discriminated-union
// variants include each field in the generated TypeScript. Bare
// name = required, ? suffix = optional. Fields without a variants
// tag are excluded from the generated union. See
// scripts/apitypings/main.go for the codegen that reads these.
//
// omitempty rules (enforced by TestChatMessagePartVariantTags):
// - If a field is required (no ? suffix) in ANY variant, it
// must NOT use omitempty. Go would silently drop zero values
// that TypeScript expects to always be present.
// - If a field is optional (? suffix) in ALL of its variants,
// it MUST use omitempty. Sending zero values for fields that
// the frontend does not expect adds noise to the wire format
// and wastes space in persisted chat_messages rows.
type ChatMessagePart struct {
Type ChatMessagePartType `json:"type"`
Text string `json:"text" variants:"text,reasoning"`
Signature string `json:"signature,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty" variants:"tool-call?,tool-result?"`
ToolName string `json:"tool_name,omitempty" variants:"tool-call?,tool-result?"`
MCPServerConfigID uuid.NullUUID `json:"mcp_server_config_id,omitempty" format:"uuid" variants:"tool-call?,tool-result?"`
Args json.RawMessage `json:"args,omitempty" variants:"tool-call?"`
ArgsDelta string `json:"args_delta,omitempty" variants:"tool-call?"`
Result json.RawMessage `json:"result,omitempty" variants:"tool-result?"`
ResultDelta string `json:"result_delta,omitempty"`
IsError bool `json:"is_error,omitempty" variants:"tool-result?"`
IsMedia bool `json:"is_media,omitempty" variants:"tool-result?"`
SourceID string `json:"source_id,omitempty" variants:"source?"`
URL string `json:"url" variants:"source"`
Title string `json:"title,omitempty" variants:"source?"`
MediaType string `json:"media_type" variants:"file"`
Name string `json:"name,omitempty" variants:"file?"`
Data []byte `json:"data,omitempty" variants:"file?"`
FileID uuid.NullUUID `json:"file_id,omitempty" format:"uuid" variants:"file?"`
FileName string `json:"file_name" variants:"file-reference"`
StartLine int `json:"start_line" variants:"file-reference"`
EndLine int `json:"end_line" variants:"file-reference"`
// The code content from the diff that was commented on.
Content string `json:"content" variants:"file-reference"`
// ProviderMetadata holds provider-specific response metadata
// (e.g. Anthropic cache control hints) as raw JSON. Internal
// only: stripped by db2sdk before API responses.
ProviderMetadata json.RawMessage `json:"provider_metadata,omitempty" typescript:"-"`
// ProviderExecuted indicates the tool call was executed by
// the provider (e.g. Anthropic computer use).
ProviderExecuted bool `json:"provider_executed,omitempty" variants:"tool-call?,tool-result?"`
// CreatedAt records when this part was produced. Present on
// tool-call and tool-result parts so the frontend can compute
// tool execution duration.
CreatedAt *time.Time `json:"created_at,omitempty" format:"date-time" variants:"tool-call?,tool-result?"`
// ContextFilePath is the absolute path of a file loaded into
// the LLM context (e.g. an AGENTS.md instruction file).
ContextFilePath string `json:"context_file_path" variants:"context-file"`
// ContextFileContent holds the file content sent to the LLM.
// Internal only: stripped before API responses to keep
// payloads small. The backend reads it when building the
// prompt via partsToMessageParts.
ContextFileContent string `json:"context_file_content,omitempty" typescript:"-"`
// ContextFileTruncated indicates the file exceeded the 64KiB
// instruction file limit and was truncated.
ContextFileTruncated bool `json:"context_file_truncated,omitempty" variants:"context-file?"`
// ContextFileAgentID is the workspace agent that provided
// this context file. Used to detect when the agent changes
// (e.g. workspace rebuilt) so instruction files can be
// re-persisted with fresh content.
ContextFileAgentID uuid.NullUUID `json:"context_file_agent_id,omitempty" format:"uuid" variants:"context-file?"`
// ContextFileOS is the operating system of the workspace
// agent. Internal only: used during prompt expansion so
// the LLM knows the OS even on turns where InsertSystem
// is not called.
ContextFileOS string `json:"context_file_os,omitempty" typescript:"-"`
// ContextFileDirectory is the working directory of the
// workspace agent. Internal only: same purpose as
// ContextFileOS.
ContextFileDirectory string `json:"context_file_directory,omitempty" typescript:"-"`
// SkillName is the kebab-case name of a discovered skill
// from the workspace's .agents/skills/ directory.
SkillName string `json:"skill_name" variants:"skill"`
// SkillDescription is the short description from the skill's
// SKILL.md frontmatter.
SkillDescription string `json:"skill_description,omitempty" variants:"skill?"`
// SkillDir is the absolute path to the skill directory inside
// the workspace filesystem. Internal only: used by
// read_skill/read_skill_file tools to locate skill files.
SkillDir string `json:"skill_dir,omitempty" typescript:"-"`
// ContextFileSkillMetaFile is the basename of the skill
// meta file (e.g. "SKILL.md") at the time of persistence.
// Internal only: restored on subsequent turns so the
// read_skill tool uses the correct filename even when the
// agent configured a non-default value.
ContextFileSkillMetaFile string `json:"context_file_skill_meta_file,omitempty" typescript:"-"`
}
// StripInternal removes internal-only fields that must not be
// sent to API clients. Call before publishing via REST or SSE.
//
// Note: ArgsDelta and ResultDelta are intentionally preserved.
// They are streaming-only fields consumed by the frontend via
// SSE message_part events (see processStepStream in chatloop).
func (p *ChatMessagePart) StripInternal() {
p.ProviderMetadata = nil
if p.FileID.Valid {
p.Data = nil
}
p.ContextFileContent = ""
p.ContextFileOS = ""
p.ContextFileDirectory = ""
p.SkillDir = ""
p.ContextFileSkillMetaFile = ""
}
// ChatMessageText builds a text chat message part.
func ChatMessageText(text string) ChatMessagePart {
return ChatMessagePart{Type: ChatMessagePartTypeText, Text: text}
}
// ChatMessageReasoning builds a reasoning chat message part.
func ChatMessageReasoning(text string) ChatMessagePart {
return ChatMessagePart{Type: ChatMessagePartTypeReasoning, Text: text}
}
// ChatMessageToolCall builds a tool-call chat message part.
func ChatMessageToolCall(toolCallID, toolName string, args json.RawMessage) ChatMessagePart {
return ChatMessagePart{
Type: ChatMessagePartTypeToolCall,
ToolCallID: toolCallID,
ToolName: toolName,
Args: args,
}
}
// ChatMessageToolResult builds a tool-result chat message part.
// The isMedia flag marks the result as carrying binary media content
// (e.g. a screenshot) so that round-trip reconstruction preserves
// the media type instead of sending raw base64 as text tokens.
func ChatMessageToolResult(toolCallID, toolName string, result json.RawMessage, isError bool, isMedia bool) ChatMessagePart {
return ChatMessagePart{
Type: ChatMessagePartTypeToolResult,
ToolCallID: toolCallID,
ToolName: toolName,
Result: result,
IsError: isError,
IsMedia: isMedia,
}
}
// ChatMessageFile builds a file chat message part.
func ChatMessageFile(fileID uuid.UUID, mediaType string, name string) ChatMessagePart {
return ChatMessagePart{
Type: ChatMessagePartTypeFile,
FileID: uuid.NullUUID{UUID: fileID, Valid: true},
MediaType: mediaType,
Name: name,
}
}
// ChatMessageFileReference builds a file-reference chat message part.
func ChatMessageFileReference(fileName string, startLine, endLine int, content string) ChatMessagePart {
return ChatMessagePart{
Type: ChatMessagePartTypeFileReference,
FileName: fileName,
StartLine: startLine,
EndLine: endLine,
Content: content,
}
}
// ChatMessageSource builds a source chat message part.
func ChatMessageSource(sourceID, sourceURL, title string) ChatMessagePart {
return ChatMessagePart{
Type: ChatMessagePartTypeSource,
SourceID: sourceID,
URL: sourceURL,
Title: title,
}
}
// ChatInputPartType represents an input part type for user chat input.
type ChatInputPartType string
const (
ChatInputPartTypeText ChatInputPartType = "text"
ChatInputPartTypeFile ChatInputPartType = "file"
ChatInputPartTypeFileReference ChatInputPartType = "file-reference"
)
// ChatInputPart is a single user input part for creating a chat.
type ChatInputPart struct {
Type ChatInputPartType `json:"type"`
Text string `json:"text,omitempty"`
FileID uuid.UUID `json:"file_id,omitempty" format:"uuid"`
// The following fields are only set when Type is
// ChatInputPartTypeFileReference.
FileName string `json:"file_name,omitempty"`
StartLine int `json:"start_line,omitempty"`
EndLine int `json:"end_line,omitempty"`
// The code content from the diff that was commented on.
Content string `json:"content,omitempty"`
}
// SubmitToolResultsRequest is the body for POST /chats/{id}/tool-results.
type SubmitToolResultsRequest struct {
Results []ToolResult `json:"results"`
}
// ToolResult is the client's response to a dynamic tool call.
type ToolResult struct {
ToolCallID string `json:"tool_call_id"`
Output json.RawMessage `json:"output"`
IsError bool `json:"is_error"`
}
// CreateChatRequest is the request to create a new chat.
type CreateChatRequest struct {
OrganizationID uuid.UUID `json:"organization_id" format:"uuid"`
Content []ChatInputPart `json:"content"`
SystemPrompt string `json:"system_prompt,omitempty"`
WorkspaceID *uuid.UUID `json:"workspace_id,omitempty" format:"uuid"`
ModelConfigID *uuid.UUID `json:"model_config_id,omitempty" format:"uuid"`
MCPServerIDs []uuid.UUID `json:"mcp_server_ids,omitempty" format:"uuid"`
Labels map[string]string `json:"labels,omitempty"`
// UnsafeDynamicTools declares client-executed tools that the
// LLM can invoke. This API is highly experimental and highly
// subject to change.
UnsafeDynamicTools []DynamicTool `json:"unsafe_dynamic_tools,omitempty"`
PlanMode ChatPlanMode `json:"plan_mode,omitempty"`
ClientType ChatClientType `json:"client_type,omitempty"`
}
// UpdateChatRequest is the request to update a chat.
type UpdateChatRequest struct {
Title *string `json:"title,omitempty"`
Archived *bool `json:"archived,omitempty"`
WorkspaceID *uuid.UUID `json:"workspace_id,omitempty" format:"uuid"`
// PinOrder controls the chat's pinned state and position.
// - nil: no change to pin state.
// - 0: unpin the chat.
// - >0 (chat is unpinned): pin the chat, appending it to
// the end of the pinned list. The specific value is
// ignored; the server assigns the next available position.
// - >0 (chat is already pinned): move the chat to the
// requested position, shifting neighbors as needed. The
// value is clamped to [1, pinned_count].
PinOrder *int32 `json:"pin_order,omitempty"`
Labels *map[string]string `json:"labels,omitempty"`
// PlanMode switches the chat's persistent plan mode.
// nil: no change, ptr to "plan": enable, ptr to "": clear.
PlanMode *ChatPlanMode `json:"plan_mode,omitempty"`
}
// ChatBusyBehavior controls what happens when a user sends a message
// while the chat is already processing.
type ChatBusyBehavior string
const (
// ChatBusyBehaviorQueue queues the message for processing after
// the current run finishes.
ChatBusyBehaviorQueue ChatBusyBehavior = "queue"
// ChatBusyBehaviorInterrupt queues the message and interrupts
// the active run. The partial assistant response is persisted
// before the queued message is promoted, preserving correct
// conversation order.
ChatBusyBehaviorInterrupt ChatBusyBehavior = "interrupt"
)
// ChatPlanMode represents the persistent plan mode state of a chat.
type ChatPlanMode string
const (
// ChatPlanModePlan activates plan mode for the chat.
ChatPlanModePlan ChatPlanMode = "plan"
)
// CreateChatMessageRequest is the request to add a message to a chat.
type CreateChatMessageRequest struct {
Content []ChatInputPart `json:"content"`
ModelConfigID *uuid.UUID `json:"model_config_id,omitempty" format:"uuid"`
MCPServerIDs *[]uuid.UUID `json:"mcp_server_ids,omitempty" format:"uuid"`
BusyBehavior ChatBusyBehavior `json:"busy_behavior,omitempty" enums:"queue,interrupt"`
// PlanMode switches the chat's persistent plan mode.
// nil: no change, ptr to "plan": enable, ptr to "": clear.
PlanMode *ChatPlanMode `json:"plan_mode,omitempty"`
}
// EditChatMessageRequest is the request to edit a user message in a chat.
type EditChatMessageRequest struct {
Content []ChatInputPart `json:"content"`
}
// CreateChatMessageResponse is the response from adding a message to a chat.
type CreateChatMessageResponse struct {
Message *ChatMessage `json:"message,omitempty"`
QueuedMessage *ChatQueuedMessage `json:"queued_message,omitempty"`
Queued bool `json:"queued"`
Warnings []string `json:"warnings,omitempty"`
}
// EditChatMessageResponse is the response from editing a message in a chat.
// Edits are always synchronous (no queueing), so the message is returned
// directly.
type EditChatMessageResponse struct {
Message ChatMessage `json:"message"`
Warnings []string `json:"warnings,omitempty"`
}
// UploadChatFileResponse is the response from uploading a chat file.
type UploadChatFileResponse struct {
ID uuid.UUID `json:"id" format:"uuid"`
}
// ChatMessagesResponse contains the messages and queued messages for a chat.
type ChatMessagesResponse struct {
Messages []ChatMessage `json:"messages"`
QueuedMessages []ChatQueuedMessage `json:"queued_messages"`
HasMore bool `json:"has_more"`
}
// ChatModelProviderUnavailableReason explains why a provider cannot be used.
type ChatModelProviderUnavailableReason string
const (
ChatModelProviderUnavailableMissingAPIKey ChatModelProviderUnavailableReason = "missing_api_key"
ChatModelProviderUnavailableFetchFailed ChatModelProviderUnavailableReason = "fetch_failed"
// #nosec G101
ChatModelProviderUnavailableReasonUserAPIKeyRequired ChatModelProviderUnavailableReason = "user_api_key_required"
)
// ChatModel represents a model in the chat model catalog.
type ChatModel struct {
ID string `json:"id"`
Provider string `json:"provider"`
Model string `json:"model"`
DisplayName string `json:"display_name"`
}
// ChatModelProvider represents provider availability and model results.
type ChatModelProvider struct {
Provider string `json:"provider"`
Available bool `json:"available"`
UnavailableReason ChatModelProviderUnavailableReason `json:"unavailable_reason,omitempty"`
Models []ChatModel `json:"models"`
}
// ChatModelsResponse is the catalog returned from chat model discovery.
type ChatModelsResponse struct {
Providers []ChatModelProvider `json:"providers"`
}
// ChatSystemPromptResponse is the response body for the chat system prompt
// configuration endpoint.
type ChatSystemPromptResponse struct {
SystemPrompt string `json:"system_prompt"`
IncludeDefaultSystemPrompt bool `json:"include_default_system_prompt"`
DefaultSystemPrompt string `json:"default_system_prompt"`
}
// UpdateChatSystemPromptRequest is the request body for updating the chat
// system prompt configuration.
type UpdateChatSystemPromptRequest struct {
SystemPrompt string `json:"system_prompt"`
IncludeDefaultSystemPrompt *bool `json:"include_default_system_prompt,omitempty"`
}
// ChatPlanModeInstructionsResponse is the response body for the
// plan mode instructions configuration endpoint.
type ChatPlanModeInstructionsResponse struct {
PlanModeInstructions string `json:"plan_mode_instructions"`
}
// UpdateChatPlanModeInstructionsRequest is the request body for
// updating the plan mode instructions configuration.
type UpdateChatPlanModeInstructionsRequest struct {
PlanModeInstructions string `json:"plan_mode_instructions"`
}
// ChatAgentModelOverrideContext identifies which chat or subagent context
// a deployment override applies to.
type ChatAgentModelOverrideContext string
const (
ChatAgentModelOverrideContextGeneral ChatAgentModelOverrideContext = "general"
ChatAgentModelOverrideContextExplore ChatAgentModelOverrideContext = "explore"
)
// Valid reports whether the override context is one of the supported values.
func (c ChatAgentModelOverrideContext) Valid() bool {
switch c {
case ChatAgentModelOverrideContextGeneral,
ChatAgentModelOverrideContextExplore:
return true
default:
return false
}
}
// AllChatAgentModelOverrideContexts returns all supported override contexts.
func AllChatAgentModelOverrideContexts() []ChatAgentModelOverrideContext {
return []ChatAgentModelOverrideContext{
ChatAgentModelOverrideContextGeneral,
ChatAgentModelOverrideContextExplore,
}
}
// ChatAgentModelOverrideResponse is the response body for the chat agent
// model override configuration endpoint.
type ChatAgentModelOverrideResponse struct {
Context ChatAgentModelOverrideContext `json:"context"`
ModelConfigID string `json:"model_config_id"`
IsMalformed bool `json:"is_malformed"`
}
// UpdateChatAgentModelOverrideRequest is the request body for updating the
// chat agent model override configuration endpoint.
type UpdateChatAgentModelOverrideRequest struct {
ModelConfigID string `json:"model_config_id"`
}
// UserChatCustomPrompt is the request and response body for the
// user chat custom prompt configuration endpoint.
type UserChatCustomPrompt struct {
CustomPrompt string `json:"custom_prompt"`
}
// UserChatCompactionThreshold is a user's per-model chat compaction
// threshold override.
type UserChatCompactionThreshold struct {
ModelConfigID uuid.UUID `json:"model_config_id" format:"uuid"`
ThresholdPercent int32 `json:"threshold_percent"`
}
// UserChatCompactionThresholds wraps the user's per-model chat
// compaction threshold overrides.
type UserChatCompactionThresholds struct {
Thresholds []UserChatCompactionThreshold `json:"thresholds"`
}
// UpdateUserChatCompactionThresholdRequest sets a user's per-model
// chat compaction threshold override.
type UpdateUserChatCompactionThresholdRequest struct {
ThresholdPercent int32 `json:"threshold_percent" validate:"min=0,max=100"`
}
// ChatDesktopEnabledResponse is the response for getting the desktop setting.
type ChatDesktopEnabledResponse struct {
EnableDesktop bool `json:"enable_desktop"`
}
// UpdateChatDesktopEnabledRequest is the request to update the desktop setting.
type UpdateChatDesktopEnabledRequest struct {
EnableDesktop bool `json:"enable_desktop"`
}
// ChatDebugLoggingAdminSettings describes the runtime admin setting
// that allows users to opt into chat debug logging.
type ChatDebugLoggingAdminSettings struct {
AllowUsers bool `json:"allow_users"`
ForcedByDeployment bool `json:"forced_by_deployment"`
}
// UserChatDebugLoggingSettings describes whether debug logging is
// active for the current user and whether the user may control it.
type UserChatDebugLoggingSettings struct {
DebugLoggingEnabled bool `json:"debug_logging_enabled"`
UserToggleAllowed bool `json:"user_toggle_allowed"`
ForcedByDeployment bool `json:"forced_by_deployment"`
}
// UpdateChatDebugLoggingAllowUsersRequest is the admin request to
// toggle whether users may opt into chat debug logging.
type UpdateChatDebugLoggingAllowUsersRequest struct {
AllowUsers bool `json:"allow_users"`
}
// UpdateUserChatDebugLoggingRequest is the per-user request to
// opt into or out of chat debug logging.
type UpdateUserChatDebugLoggingRequest struct {
DebugLoggingEnabled bool `json:"debug_logging_enabled"`
}
// ChatDebugStatus enumerates the lifecycle states shared by debug
// runs and steps. These values must match the literals used in
// FinalizeStaleChatDebugRows and all insert/update callers.
type ChatDebugStatus string
const (
ChatDebugStatusInProgress ChatDebugStatus = "in_progress"
ChatDebugStatusCompleted ChatDebugStatus = "completed"
ChatDebugStatusError ChatDebugStatus = "error"
ChatDebugStatusInterrupted ChatDebugStatus = "interrupted"
)
// ChatDebugTerminalStatuses returns the statuses that represent a
// finished lifecycle. The SQL query FinalizeStaleChatDebugRows uses
// a NOT IN list that must match these exactly. A test in
// coderd/database asserts this alignment at CI time.
func ChatDebugTerminalStatuses() []ChatDebugStatus {
return []ChatDebugStatus{
ChatDebugStatusCompleted,
ChatDebugStatusError,
ChatDebugStatusInterrupted,
}
}
// AllChatDebugStatuses contains every ChatDebugStatus value.
// Update this when adding new constants above.
var AllChatDebugStatuses = []ChatDebugStatus{
ChatDebugStatusInProgress,
ChatDebugStatusCompleted,
ChatDebugStatusError,
ChatDebugStatusInterrupted,
}
// ChatDebugRunKind labels the operation that produced the debug
// run. Each value corresponds to a distinct call-site in chatd.
type ChatDebugRunKind string
const (
ChatDebugRunKindChatTurn ChatDebugRunKind = "chat_turn"
ChatDebugRunKindTitleGeneration ChatDebugRunKind = "title_generation"
ChatDebugRunKindQuickgen ChatDebugRunKind = "quickgen"
ChatDebugRunKindCompaction ChatDebugRunKind = "compaction"
)
// AllChatDebugRunKinds contains every ChatDebugRunKind value.
// Update this when adding new constants above.
var AllChatDebugRunKinds = []ChatDebugRunKind{
ChatDebugRunKindChatTurn,
ChatDebugRunKindTitleGeneration,
ChatDebugRunKindQuickgen,
ChatDebugRunKindCompaction,
}
// ChatDebugStepOperation labels the model interaction type for a
// debug step.
type ChatDebugStepOperation string
const (
ChatDebugStepOperationStream ChatDebugStepOperation = "stream"
ChatDebugStepOperationGenerate ChatDebugStepOperation = "generate"
)
// AllChatDebugStepOperations contains every ChatDebugStepOperation
// value. Update this when adding new constants above.
var AllChatDebugStepOperations = []ChatDebugStepOperation{
ChatDebugStepOperationStream,
ChatDebugStepOperationGenerate,
}
// ChatDebugRunSummary is a lightweight run entry for list endpoints.
type ChatDebugRunSummary struct {
ID uuid.UUID `json:"id" format:"uuid"`
ChatID uuid.UUID `json:"chat_id" format:"uuid"`
Kind ChatDebugRunKind `json:"kind"`
Status ChatDebugStatus `json:"status"`
Provider *string `json:"provider,omitempty"`
Model *string `json:"model,omitempty"`
Summary map[string]any `json:"summary"`
StartedAt time.Time `json:"started_at" format:"date-time"`
UpdatedAt time.Time `json:"updated_at" format:"date-time"`
FinishedAt *time.Time `json:"finished_at,omitempty" format:"date-time"`
}
// ChatDebugRun is the detailed run response returned by the run-detail
// endpoint. It includes the same summary fields as ChatDebugRunSummary
// along with the full step history for the run.
type ChatDebugRun struct {
ID uuid.UUID `json:"id" format:"uuid"`
ChatID uuid.UUID `json:"chat_id" format:"uuid"`
RootChatID *uuid.UUID `json:"root_chat_id,omitempty" format:"uuid"`
ParentChatID *uuid.UUID `json:"parent_chat_id,omitempty" format:"uuid"`
ModelConfigID *uuid.UUID `json:"model_config_id,omitempty" format:"uuid"`
TriggerMessageID *int64 `json:"trigger_message_id,omitempty"`
HistoryTipMessageID *int64 `json:"history_tip_message_id,omitempty"`
Kind ChatDebugRunKind `json:"kind"`
Status ChatDebugStatus `json:"status"`
Provider *string `json:"provider,omitempty"`
Model *string `json:"model,omitempty"`
Summary map[string]any `json:"summary"`
StartedAt time.Time `json:"started_at" format:"date-time"`
UpdatedAt time.Time `json:"updated_at" format:"date-time"`
FinishedAt *time.Time `json:"finished_at,omitempty" format:"date-time"`
Steps []ChatDebugStep `json:"steps"`
}
// ChatDebugStep is a single step within a debug run.
type ChatDebugStep struct {
ID uuid.UUID `json:"id" format:"uuid"`
RunID uuid.UUID `json:"run_id" format:"uuid"`
ChatID uuid.UUID `json:"chat_id" format:"uuid"`
StepNumber int32 `json:"step_number"`
Operation ChatDebugStepOperation `json:"operation"`
Status ChatDebugStatus `json:"status"`
HistoryTipMessageID *int64 `json:"history_tip_message_id,omitempty"`
AssistantMessageID *int64 `json:"assistant_message_id,omitempty"`
NormalizedRequest map[string]any `json:"normalized_request"`
NormalizedResponse map[string]any `json:"normalized_response,omitempty"`
Usage map[string]any `json:"usage,omitempty"`
Attempts []map[string]any `json:"attempts"`
Error map[string]any `json:"error,omitempty"`
Metadata map[string]any `json:"metadata"`
StartedAt time.Time `json:"started_at" format:"date-time"`
UpdatedAt time.Time `json:"updated_at" format:"date-time"`
FinishedAt *time.Time `json:"finished_at,omitempty" format:"date-time"`
}
// DefaultChatWorkspaceTTL is the default TTL for chat workspaces.
// Zero means disabled — the template's own autostop setting applies.
const DefaultChatWorkspaceTTL = 0
// DefaultChatAutoArchiveDays is the default auto-archive window, in
// days, applied when no site config row exists. Zero disables
// auto-archival.
const DefaultChatAutoArchiveDays int32 = 0
// ChatWorkspaceTTLResponse is the response for getting the chat
// workspace TTL setting.
type ChatWorkspaceTTLResponse struct {
// WorkspaceTTLMillis is the workspace TTL in milliseconds.
// Zero means disabled — the template's own autostop setting applies.
WorkspaceTTLMillis int64 `json:"workspace_ttl_ms"`
}
// UpdateChatWorkspaceTTLRequest is the request to update the chat
// workspace TTL setting.
type UpdateChatWorkspaceTTLRequest struct {
// WorkspaceTTLMillis is the workspace TTL in milliseconds.
// Zero means disabled — the template's own autostop setting applies.
WorkspaceTTLMillis int64 `json:"workspace_ttl_ms"`
}
// ChatRetentionDaysResponse contains the current chat retention setting.
type ChatRetentionDaysResponse struct {
RetentionDays int32 `json:"retention_days"`
}
// UpdateChatRetentionDaysRequest is a request to update the chat
// retention period.
type UpdateChatRetentionDaysRequest struct {
RetentionDays int32 `json:"retention_days"`
}
// ChatAutoArchiveDaysResponse contains the current chat auto-archive setting.
type ChatAutoArchiveDaysResponse struct {
AutoArchiveDays int32 `json:"auto_archive_days"`
}
// UpdateChatAutoArchiveDaysRequest is a request to update the chat
// auto-archive period.
type UpdateChatAutoArchiveDaysRequest struct {
AutoArchiveDays int32 `json:"auto_archive_days"`
}
// ParseChatWorkspaceTTL parses a stored TTL string, returning the
// default when the value is empty.
func ParseChatWorkspaceTTL(s string) (time.Duration, error) {
if s == "" {
return DefaultChatWorkspaceTTL, nil
}
d, err := time.ParseDuration(s)
if err != nil {
return 0, xerrors.Errorf("invalid duration %q: %w", s, err)
}
if d < 0 {
return 0, xerrors.New("duration must be non-negative")
}
return d, nil
}
// ChatTemplateAllowlist is the request and response body for the
// chat template allowlist configuration endpoint. An empty list
// means all templates are allowed.
type ChatTemplateAllowlist struct {
TemplateIDs []string `json:"template_ids"`
}
// ChatProviderConfigSource describes how a provider entry is sourced.
type ChatProviderConfigSource string
const (
ChatProviderConfigSourceDatabase ChatProviderConfigSource = "database"
ChatProviderConfigSourceEnvPreset ChatProviderConfigSource = "env_preset"
ChatProviderConfigSourceSupported ChatProviderConfigSource = "supported"
)
// ChatProviderConfig is an admin-managed provider configuration.
type ChatProviderConfig struct {
ID uuid.UUID `json:"id" format:"uuid"`
Provider string `json:"provider"`
DisplayName string `json:"display_name"`
Enabled bool `json:"enabled"`
HasAPIKey bool `json:"has_api_key"`
CentralAPIKeyEnabled bool `json:"central_api_key_enabled"`
AllowUserAPIKey bool `json:"allow_user_api_key"`
AllowCentralAPIKeyFallback bool `json:"allow_central_api_key_fallback"`
BaseURL string `json:"base_url,omitempty"`
Source ChatProviderConfigSource `json:"source"`
CreatedAt time.Time `json:"created_at,omitempty" format:"date-time"`
UpdatedAt time.Time `json:"updated_at,omitempty" format:"date-time"`
}
// CreateChatProviderConfigRequest creates a chat provider config.
type CreateChatProviderConfigRequest struct {
Provider string `json:"provider"`
DisplayName string `json:"display_name,omitempty"`
APIKey string `json:"api_key,omitempty"`
BaseURL string `json:"base_url,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
CentralAPIKeyEnabled *bool `json:"central_api_key_enabled,omitempty"`
AllowUserAPIKey *bool `json:"allow_user_api_key,omitempty"`
AllowCentralAPIKeyFallback *bool `json:"allow_central_api_key_fallback,omitempty"`
}
// UpdateChatProviderConfigRequest updates a chat provider config.
type UpdateChatProviderConfigRequest struct {
DisplayName string `json:"display_name,omitempty"`
APIKey *string `json:"api_key,omitempty"`
BaseURL *string `json:"base_url,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
CentralAPIKeyEnabled *bool `json:"central_api_key_enabled,omitempty"`
AllowUserAPIKey *bool `json:"allow_user_api_key,omitempty"`
AllowCentralAPIKeyFallback *bool `json:"allow_central_api_key_fallback,omitempty"`
}
// UserChatProviderConfig is a summary of a provider that allows
// user-supplied keys, as seen from the current user's perspective.
type UserChatProviderConfig struct {
ProviderID uuid.UUID `json:"provider_id" format:"uuid"`
Provider string `json:"provider"`
DisplayName string `json:"display_name"`
HasUserAPIKey bool `json:"has_user_api_key"`
HasCentralAPIKeyFallback bool `json:"has_central_api_key_fallback"`
}
// CreateUserChatProviderKeyRequest creates or replaces a user's API key
// for a provider.
type CreateUserChatProviderKeyRequest struct {
APIKey string `json:"api_key"`
}
// ChatModelConfig is an admin-managed model configuration.
type ChatModelConfig struct {
ID uuid.UUID `json:"id" format:"uuid"`
Provider string `json:"provider"`
Model string `json:"model"`
DisplayName string `json:"display_name"`
Enabled bool `json:"enabled"`
IsDefault bool `json:"is_default"`
ContextLimit int64 `json:"context_limit"`
CompressionThreshold int32 `json:"compression_threshold"`
ModelConfig *ChatModelCallConfig `json:"model_config,omitempty"`
CreatedAt time.Time `json:"created_at" format:"date-time"`
UpdatedAt time.Time `json:"updated_at" format:"date-time"`
}
// ChatModelProviderOptions contains typed provider-specific options.
//
// Note: Azure models use the `openai` options shape.
// Note: Bedrock models use the `anthropic` options shape.
type ChatModelProviderOptions struct {
OpenAI *ChatModelOpenAIProviderOptions `json:"openai,omitempty"`
Anthropic *ChatModelAnthropicProviderOptions `json:"anthropic,omitempty"`
Google *ChatModelGoogleProviderOptions `json:"google,omitempty"`
OpenAICompat *ChatModelOpenAICompatProviderOptions `json:"openaicompat,omitempty"`
OpenRouter *ChatModelOpenRouterProviderOptions `json:"openrouter,omitempty"`
Vercel *ChatModelVercelProviderOptions `json:"vercel,omitempty"`
}
// ChatModelOpenAIProviderOptions configures OpenAI provider behavior.
type ChatModelOpenAIProviderOptions struct {
Include []string `json:"include,omitempty" description:"Model names to include in discovery" hidden:"true"`
Instructions *string `json:"instructions,omitempty" description:"System-level instructions prepended to the conversation" hidden:"true"`
LogitBias map[string]int64 `json:"logit_bias,omitempty" description:"Token IDs mapped to bias values from -100 to 100" hidden:"true"`
LogProbs *bool `json:"log_probs,omitempty" description:"Whether to return log probabilities of output tokens" hidden:"true"`
TopLogProbs *int64 `json:"top_log_probs,omitempty" description:"Number of most likely tokens to return log probabilities for" hidden:"true"`
MaxToolCalls *int64 `json:"max_tool_calls,omitempty" description:"Maximum number of tool calls per response"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty" description:"Whether the model may make multiple tool calls in parallel"`
User *string `json:"user,omitempty" description:"Unique identifier for the end user for abuse monitoring" hidden:"true"`
ReasoningEffort *string `json:"reasoning_effort,omitempty" description:"Controls the level of reasoning effort" enum:"none,minimal,low,medium,high,xhigh"`
ReasoningSummary *string `json:"reasoning_summary,omitempty" description:"Controls whether reasoning tokens are summarized in the response" enum:"auto,concise,detailed"`
MaxCompletionTokens *int64 `json:"max_completion_tokens,omitempty" description:"Upper bound on tokens the model may generate"`
TextVerbosity *string `json:"text_verbosity,omitempty" description:"Controls the verbosity of the text response" enum:"low,medium,high"`
Prediction map[string]any `json:"prediction,omitempty" description:"Predicted output content to speed up responses" hidden:"true"`
Store *bool `json:"store,omitempty" description:"Whether to store the response on OpenAI for later retrieval via the API and dashboard logs"`
Metadata map[string]any `json:"metadata,omitempty" description:"Arbitrary metadata to attach to the request" hidden:"true"`
PromptCacheKey *string `json:"prompt_cache_key,omitempty" description:"Key for enabling cross-request prompt caching"`
SafetyIdentifier *string `json:"safety_identifier,omitempty" description:"Developer-specific safety identifier for the request" hidden:"true"`
ServiceTier *string `json:"service_tier,omitempty" description:"Latency tier to use for processing the request" enum:"auto,default,flex,scale,priority"`
StructuredOutputs *bool `json:"structured_outputs,omitempty" description:"Whether to enable structured JSON output mode" hidden:"true"`
StrictJSONSchema *bool `json:"strict_json_schema,omitempty" description:"Whether to enforce strict adherence to the JSON schema" hidden:"true"`
WebSearchEnabled *bool `json:"web_search_enabled,omitempty" description:"Enable OpenAI web search tool for grounding responses with real-time information"`
SearchContextSize *string `json:"search_context_size,omitempty" description:"Amount of search context to use" enum:"low,medium,high"`
AllowedDomains []string `json:"allowed_domains,omitempty" label:"Web Search: Allowed Domains" description:"Restrict web search to these domains"`
}
// ChatModelAnthropicThinkingOptions configures Anthropic thinking budget.
type ChatModelAnthropicThinkingOptions struct {
BudgetTokens *int64 `json:"budget_tokens,omitempty" description:"Maximum number of tokens the model may use for thinking"`
}
// ChatModelAnthropicProviderOptions configures Anthropic provider behavior.
type ChatModelAnthropicProviderOptions struct {
SendReasoning *bool `json:"send_reasoning,omitempty" description:"Whether to include reasoning content in the response"`
Thinking *ChatModelAnthropicThinkingOptions `json:"thinking,omitempty" description:"Configuration for extended thinking"`
Effort *string `json:"effort,omitempty" label:"Reasoning Effort" description:"Controls the level of reasoning effort" enum:"low,medium,high,xhigh,max"`
DisableParallelToolUse *bool `json:"disable_parallel_tool_use,omitempty" description:"Whether to disable parallel tool execution"`
WebSearchEnabled *bool `json:"web_search_enabled,omitempty" description:"Enable Anthropic web search tool for grounding responses with real-time information"`
AllowedDomains []string `json:"allowed_domains,omitempty" label:"Web Search: Allowed Domains" description:"Restrict web search to these domains (cannot be used with blocked_domains)"`
BlockedDomains []string `json:"blocked_domains,omitempty" label:"Web Search: Blocked Domains" description:"Block web search on these domains (cannot be used with allowed_domains)"`
}
// ChatModelGoogleThinkingConfig configures Google thinking behavior.