-
Notifications
You must be signed in to change notification settings - Fork 3.4k
Expand file tree
/
Copy pathfilesystem.go
More file actions
587 lines (493 loc) · 15.6 KB
/
filesystem.go
File metadata and controls
587 lines (493 loc) · 15.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
package filesystem
import (
"context"
"errors"
"image"
"io"
"mime/multipart"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"github.com/disintegration/imaging"
"github.com/fatih/color"
"github.com/gabriel-vasile/mimetype"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/fileblob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/list"
// explicit webp decoder because disintegration/imaging does not support webp
_ "golang.org/x/image/webp"
)
// note: the same as blob.ErrNotFound for backward compatibility with earlier versions
var ErrNotFound = blob.ErrNotFound
const metadataOriginalName = "original-filename"
type System struct {
ctx context.Context
bucket *blob.Bucket
}
// NewS3 initializes a new S3 filesystem instance.
//
// NB! Make sure to call `Close()` after you are done working with it.
func NewS3(
bucketName string,
region string,
endpoint string,
accessKey string,
secretKey string,
s3ForcePathStyle bool,
) (*System, error) {
ctx := context.Background() // default context
client := &s3.S3{
Bucket: bucketName,
Region: region,
Endpoint: endpoint,
AccessKey: accessKey,
SecretKey: secretKey,
UsePathStyle: s3ForcePathStyle,
}
drv, err := s3blob.New(client)
if err != nil {
return nil, err
}
return &System{ctx: ctx, bucket: blob.NewBucket(drv)}, nil
}
// NewLocal initializes a new local filesystem instance.
//
// NB! Make sure to call `Close()` after you are done working with it.
func NewLocal(dirPath string) (*System, error) {
ctx := context.Background() // default context
// makes sure that the directory exist
if err := os.MkdirAll(dirPath, os.ModePerm); err != nil {
return nil, err
}
drv, err := fileblob.New(dirPath, &fileblob.Options{
NoTempDir: true,
})
if err != nil {
return nil, err
}
return &System{ctx: ctx, bucket: blob.NewBucket(drv)}, nil
}
// SetContext assigns the specified context to the current filesystem.
func (s *System) SetContext(ctx context.Context) {
s.ctx = ctx
}
// Close releases any resources used for the related filesystem.
func (s *System) Close() error {
return s.bucket.Close()
}
// Exists checks if file with fileKey path exists or not.
func (s *System) Exists(fileKey string) (bool, error) {
return s.bucket.Exists(s.ctx, fileKey)
}
// Attributes returns the attributes for the file with fileKey path.
//
// If the file doesn't exist it returns ErrNotFound.
func (s *System) Attributes(fileKey string) (*blob.Attributes, error) {
return s.bucket.Attributes(s.ctx, fileKey)
}
// GetReader returns a file content reader for the given fileKey.
//
// NB! Make sure to call Close() on the file after you are done working with it.
//
// If the file doesn't exist returns ErrNotFound.
func (s *System) GetReader(fileKey string) (*blob.Reader, error) {
return s.bucket.NewReader(s.ctx, fileKey)
}
// Deprecated: Please use GetReader(fileKey) instead.
func (s *System) GetFile(fileKey string) (*blob.Reader, error) {
color.Yellow("Deprecated: Please replace GetFile with GetReader.")
return s.GetReader(fileKey)
}
// GetReuploadableFile constructs a new reuploadable File value
// from the associated fileKey blob.Reader.
//
// If preserveName is false then the returned File.Name will have
// a new randomly generated suffix, otherwise it will reuse the original one.
//
// This method could be useful in case you want to clone an existing
// Record file and assign it to a new Record (e.g. in a Record duplicate action).
//
// If you simply want to copy an existing file to a new location you
// could check the Copy(srcKey, dstKey) method.
func (s *System) GetReuploadableFile(fileKey string, preserveName bool) (*File, error) {
attrs, err := s.Attributes(fileKey)
if err != nil {
return nil, err
}
name := path.Base(fileKey)
originalName := attrs.Metadata[metadataOriginalName]
if originalName == "" {
originalName = name
}
file := &File{}
file.Size = attrs.Size
file.OriginalName = originalName
file.Reader = openFuncAsReader(func() (io.ReadSeekCloser, error) {
return s.GetReader(fileKey)
})
if preserveName {
file.Name = name
} else {
file.Name = normalizeName(file.Reader, originalName)
}
return file, nil
}
// Copy copies the file stored at srcKey to dstKey.
//
// If srcKey file doesn't exist, it returns ErrNotFound.
//
// If dstKey file already exists, it is overwritten.
func (s *System) Copy(srcKey, dstKey string) error {
return s.bucket.Copy(s.ctx, dstKey, srcKey)
}
// List returns a flat list with info for all files under the specified prefix.
func (s *System) List(prefix string) ([]*blob.ListObject, error) {
files := []*blob.ListObject{}
iter := s.bucket.List(&blob.ListOptions{
Prefix: prefix,
})
for {
obj, err := iter.Next(s.ctx)
if err != nil {
if !errors.Is(err, io.EOF) {
return nil, err
}
break
}
files = append(files, obj)
}
return files, nil
}
// Upload writes content into the fileKey location.
func (s *System) Upload(content []byte, fileKey string) error {
opts := &blob.WriterOptions{
ContentType: mimetype.Detect(content).String(),
}
w, writerErr := s.bucket.NewWriter(s.ctx, fileKey, opts)
if writerErr != nil {
return writerErr
}
if _, err := w.Write(content); err != nil {
return errors.Join(err, w.Close())
}
return w.Close()
}
// UploadFile uploads the provided File to the fileKey location.
func (s *System) UploadFile(file *File, fileKey string) error {
f, err := file.Reader.Open()
if err != nil {
return err
}
defer f.Close()
mt, err := mimetype.DetectReader(f)
if err != nil {
return err
}
// rewind
f.Seek(0, io.SeekStart)
originalName := file.OriginalName
if len(originalName) > 255 {
// keep only the first 255 chars as a very rudimentary measure
// to prevent the metadata to grow too big in size
originalName = originalName[:255]
}
opts := &blob.WriterOptions{
ContentType: mt.String(),
Metadata: map[string]string{
metadataOriginalName: originalName,
},
}
w, err := s.bucket.NewWriter(s.ctx, fileKey, opts)
if err != nil {
return err
}
if _, err := w.ReadFrom(f); err != nil {
w.Close()
return err
}
return w.Close()
}
// UploadMultipart uploads the provided multipart file to the fileKey location.
func (s *System) UploadMultipart(fh *multipart.FileHeader, fileKey string) error {
f, err := fh.Open()
if err != nil {
return err
}
defer f.Close()
mt, err := mimetype.DetectReader(f)
if err != nil {
return err
}
// rewind
f.Seek(0, io.SeekStart)
originalName := fh.Filename
if len(originalName) > 255 {
// keep only the first 255 chars as a very rudimentary measure
// to prevent the metadata to grow too big in size
originalName = originalName[:255]
}
opts := &blob.WriterOptions{
ContentType: mt.String(),
Metadata: map[string]string{
metadataOriginalName: originalName,
},
}
w, err := s.bucket.NewWriter(s.ctx, fileKey, opts)
if err != nil {
return err
}
_, err = w.ReadFrom(f)
if err != nil {
w.Close()
return err
}
return w.Close()
}
// Delete deletes stored file at fileKey location.
//
// If the file doesn't exist returns ErrNotFound.
func (s *System) Delete(fileKey string) error {
return s.bucket.Delete(s.ctx, fileKey)
}
// DeletePrefix deletes everything starting with the specified prefix.
//
// The prefix could be subpath (ex. "/a/b/") or filename prefix (ex. "/a/b/file_").
func (s *System) DeletePrefix(prefix string) []error {
failed := []error{}
if prefix == "" {
failed = append(failed, errors.New("prefix mustn't be empty"))
return failed
}
dirsMap := map[string]struct{}{}
var isPrefixDir bool
// treat the prefix as directory only if it ends with trailing slash
if strings.HasSuffix(prefix, "/") {
isPrefixDir = true
dirsMap[strings.TrimRight(prefix, "/")] = struct{}{}
}
// delete all files with the prefix
// ---
iter := s.bucket.List(&blob.ListOptions{
Prefix: prefix,
})
for {
obj, err := iter.Next(s.ctx)
if err != nil {
if !errors.Is(err, io.EOF) {
failed = append(failed, err)
}
break
}
if err := s.Delete(obj.Key); err != nil {
failed = append(failed, err)
} else if isPrefixDir {
slashIdx := strings.LastIndex(obj.Key, "/")
if slashIdx > -1 {
dirsMap[obj.Key[:slashIdx]] = struct{}{}
}
}
}
// ---
// try to delete the empty remaining dir objects
// (this operation usually is optional and there is no need to strictly check the result)
// ---
// fill dirs slice
dirs := make([]string, 0, len(dirsMap))
for d := range dirsMap {
dirs = append(dirs, d)
}
// sort the child dirs first, aka. ["a/b/c", "a/b", "a"]
sort.SliceStable(dirs, func(i, j int) bool {
return len(strings.Split(dirs[i], "/")) > len(strings.Split(dirs[j], "/"))
})
// delete dirs
for _, d := range dirs {
if d != "" {
s.Delete(d)
}
}
// ---
return failed
}
// Checks if the provided dir prefix doesn't have any files.
//
// A trailing slash will be appended to a non-empty dir string argument
// to ensure that the checked prefix is a "directory".
//
// Returns "false" in case the has at least one file, otherwise - "true".
func (s *System) IsEmptyDir(dir string) bool {
if dir != "" && !strings.HasSuffix(dir, "/") {
dir += "/"
}
iter := s.bucket.List(&blob.ListOptions{
Prefix: dir,
})
_, err := iter.Next(s.ctx)
return err != nil && errors.Is(err, io.EOF)
}
var inlineServeContentTypes = []string{
// image
"image/png", "image/jpg", "image/jpeg", "image/gif", "image/webp", "image/x-icon", "image/bmp",
// video
"video/webm", "video/mp4", "video/3gpp", "video/quicktime", "video/x-ms-wmv",
// audio
"audio/basic", "audio/aiff", "audio/mpeg", "audio/midi", "audio/mp3", "audio/wave",
"audio/wav", "audio/x-wav", "audio/x-mpeg", "audio/x-m4a", "audio/aac",
// document
"application/pdf", "application/x-pdf",
}
// manualExtensionContentTypes is a map of file extensions to content types.
var manualExtensionContentTypes = map[string]string{
// https://github.com/whatwg/mimesniff/issues/7
".svg": "image/svg+xml",
// https://github.com/gabriel-vasile/mimetype/pull/113
".css": "text/css",
// https://github.com/pocketbase/pocketbase/issues/6597
".js": "text/javascript",
".mjs": "text/javascript",
// https://github.com/pocketbase/pocketbase/discussions/7467
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
}
// forceAttachmentParam is the name of the request query parameter to
// force "Content-Disposition: attachment" header.
const forceAttachmentParam = "download"
// Serve serves the file at fileKey location to an HTTP response.
//
// If the `download` query parameter is used the file will be always served for
// download no matter of its type (aka. with "Content-Disposition: attachment").
//
// Internally this method uses [http.ServeContent] so Range requests,
// If-Match, If-Unmodified-Since, etc. headers are handled transparently.
func (s *System) Serve(res http.ResponseWriter, req *http.Request, fileKey string, name string) error {
br, readErr := s.GetReader(fileKey)
if readErr != nil {
return readErr
}
defer br.Close()
var forceAttachment bool
if raw := req.URL.Query().Get(forceAttachmentParam); raw != "" {
forceAttachment, _ = strconv.ParseBool(raw)
}
disposition := "attachment"
realContentType := br.ContentType()
if !forceAttachment && list.ExistInSlice(realContentType, inlineServeContentTypes) {
disposition = "inline"
}
// make an exception for specific content types and force a custom
// content type to send in the response so that it can be loaded properly
extContentType := realContentType
if ct, found := manualExtensionContentTypes[filepath.Ext(fileKey)]; found {
extContentType = ct
}
setHeaderIfMissing(res, "Content-Disposition", disposition+"; filename="+name)
setHeaderIfMissing(res, "Content-Type", extContentType)
setHeaderIfMissing(res, "Content-Security-Policy", "default-src 'none'; media-src 'self'; style-src 'unsafe-inline'; sandbox")
// set a default cache-control header
// (valid for 30 days but the cache is allowed to reuse the file for any requests
// that are made in the last day while revalidating the res in the background)
setHeaderIfMissing(res, "Cache-Control", "max-age=2592000, stale-while-revalidate=86400")
http.ServeContent(res, req, name, br.ModTime(), br)
return nil
}
// note: expects key to be in a canonical form (eg. "accept-encoding" should be "Accept-Encoding").
func setHeaderIfMissing(res http.ResponseWriter, key string, value string) {
if _, ok := res.Header()[key]; !ok {
res.Header().Set(key, value)
}
}
var ThumbSizeRegex = regexp.MustCompile(`^(\d+)x(\d+)(t|b|f)?$`)
// CreateThumb creates a new thumb image for the file at originalKey location.
// The new thumb file is stored at thumbKey location.
//
// thumbSize is in the format:
// - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio
// - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio
// - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center)
// - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top)
// - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom)
// - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping)
func (s *System) CreateThumb(originalKey string, thumbKey, thumbSize string) error {
sizeParts := ThumbSizeRegex.FindStringSubmatch(thumbSize)
if len(sizeParts) != 4 {
return errors.New("thumb size must be in WxH, WxHt, WxHb or WxHf format")
}
width, _ := strconv.Atoi(sizeParts[1])
height, _ := strconv.Atoi(sizeParts[2])
resizeType := sizeParts[3]
if width == 0 && height == 0 {
return errors.New("thumb width and height cannot be zero at the same time")
}
// fetch the original
r, readErr := s.GetReader(originalKey)
if readErr != nil {
return readErr
}
defer r.Close()
// create imaging object from the original reader
// (note: only the first frame for animated image formats)
img, decodeErr := imaging.Decode(r, imaging.AutoOrientation(true))
if decodeErr != nil {
return decodeErr
}
var thumbImg *image.NRGBA
if width == 0 || height == 0 {
// force resize preserving aspect ratio
thumbImg = imaging.Resize(img, width, height, imaging.Linear)
} else {
switch resizeType {
case "f":
// fit
thumbImg = imaging.Fit(img, width, height, imaging.Linear)
case "t":
// fill and crop from top
thumbImg = imaging.Fill(img, width, height, imaging.Top, imaging.Linear)
case "b":
// fill and crop from bottom
thumbImg = imaging.Fill(img, width, height, imaging.Bottom, imaging.Linear)
default:
// fill and crop from center
thumbImg = imaging.Fill(img, width, height, imaging.Center, imaging.Linear)
}
}
originalContentType := r.ContentType()
opts := &blob.WriterOptions{
ContentType: originalContentType,
}
var format imaging.Format
switch originalContentType {
case "image/jpeg":
format = imaging.JPEG
case "image/gif":
format = imaging.GIF
case "image/tiff":
format = imaging.TIFF
case "image/bmp":
format = imaging.BMP
default:
// fallback to PNG (this includes webp!)
opts.ContentType = "image/png"
format = imaging.PNG
}
// open a thumb storage writer (aka. prepare for upload)
w, err := s.bucket.NewWriter(s.ctx, thumbKey, opts)
if err != nil {
return err
}
// thumb encode (aka. upload)
err = imaging.Encode(w, thumbImg, format)
if err != nil {
w.Close()
return err
}
// check for close errors to ensure that the thumb was really saved
return w.Close()
}