mirror of
https://github.com/SecurityBrewery/catalyst.git
synced 2025-12-06 15:22:47 +01:00
Compare commits
9 Commits
v0.15.0-rc
...
v0.15.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9da90e7cc8 | ||
|
|
d9f759c879 | ||
|
|
1e3f2f24dc | ||
|
|
3cb097126c | ||
|
|
df96362c3c | ||
|
|
377d2dad5f | ||
|
|
87fc0e6567 | ||
|
|
06fdae4ab9 | ||
|
|
27129f24d5 |
14
.github/workflows/ci.yml
vendored
14
.github/workflows/ci.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with: { go-version: '1.22' }
|
||||
with: { go-version: '1.25' }
|
||||
- run: make install-golangci-lint generate-go
|
||||
- run: git diff --exit-code
|
||||
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with: { go-version: '1.22' }
|
||||
with: { go-version: '1.25' }
|
||||
- run: make install-golangci-lint fmt-go
|
||||
- run: git diff --exit-code
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with: { go-version: '1.22' }
|
||||
with: { go-version: '1.25' }
|
||||
- run: make install-golangci-lint lint-go
|
||||
|
||||
lint-ui:
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with: { go-version: '1.22' }
|
||||
with: { go-version: '1.25' }
|
||||
- run: make test-coverage
|
||||
- uses: codecov/codecov-action@v4
|
||||
with:
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with: { go-version: '1.22' }
|
||||
with: { go-version: '1.25' }
|
||||
- uses: oven-sh/setup-bun@v1
|
||||
- run: make install-ui test-ui
|
||||
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with: { go-version: '1.22' }
|
||||
with: { go-version: '1.25' }
|
||||
- uses: oven-sh/setup-bun@v1
|
||||
- run: make install-ui build-ui install-playwright test-playwright
|
||||
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with: { go-version: '1.22' }
|
||||
with: { go-version: '1.25' }
|
||||
- uses: oven-sh/setup-bun@v1
|
||||
- run: mkdir -p catalyst_data
|
||||
- run: cp testing/data/${{ matrix.folder }}/data.db catalyst_data/data.db
|
||||
|
||||
19
.github/workflows/goreleaser.yml
vendored
19
.github/workflows/goreleaser.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v5
|
||||
with: { go-version: '1.22' }
|
||||
with: { go-version: '1.25' }
|
||||
- uses: oven-sh/setup-bun@v1
|
||||
|
||||
- run: make install-ui build-ui
|
||||
@@ -28,10 +28,13 @@ jobs:
|
||||
registry: ghcr.io
|
||||
username: "securitybrewery"
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: '~> v2'
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Run GoReleaser
|
||||
run: |
|
||||
docker run --rm --privileged \
|
||||
-v `pwd`:/go/src/github.com/SecurityBrewery/catalyst \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-w /go/src/github.com/SecurityBrewery/catalyst \
|
||||
-e CGO_ENABLED=1 \
|
||||
-e GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} \
|
||||
ghcr.io/goreleaser/goreleaser-cross:latest \
|
||||
release --clean
|
||||
@@ -8,6 +8,7 @@ linters:
|
||||
- dupl
|
||||
- err113
|
||||
- exhaustruct
|
||||
- funcorder
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- godox
|
||||
@@ -16,6 +17,7 @@ linters:
|
||||
- lll
|
||||
- maintidx
|
||||
- mnd
|
||||
- noinlineerr
|
||||
- nonamedreturns
|
||||
- perfsprint
|
||||
- prealloc
|
||||
@@ -25,6 +27,7 @@ linters:
|
||||
- unparam
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
|
||||
@@ -5,14 +5,84 @@ before:
|
||||
- go mod tidy
|
||||
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- id: darwin-amd64
|
||||
main: ./
|
||||
binary: catalyst
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=o64-clang
|
||||
- CXX=o64-clang++
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
- id: linux-arm64
|
||||
main: ./
|
||||
binary: catalyst
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=aarch64-linux-gnu-gcc
|
||||
- CXX=aarch64-linux-gnu-g++
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
- id: linux-amd64
|
||||
main: ./
|
||||
binary: catalyst
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=x86_64-linux-gnu-gcc
|
||||
- CXX=x86_64-linux-gnu-g++
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
- id: windows-amd64
|
||||
main: ./
|
||||
binary: catalyst
|
||||
goos:
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=x86_64-w64-mingw32-gcc
|
||||
- CXX=x86_64-w64-mingw32-g++
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
- id: windows-arm64
|
||||
main: ./
|
||||
binary: catalyst
|
||||
goos:
|
||||
- windows
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=/llvm-mingw/bin/aarch64-w64-mingw32-gcc
|
||||
- CXX=/llvm-mingw/bin/aarch64-w64-mingw32-g++
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
|
||||
dockers:
|
||||
- ids: [ catalyst ]
|
||||
- ids: [ linux-amd64 ]
|
||||
dockerfile: docker/Dockerfile
|
||||
image_templates:
|
||||
- "ghcr.io/securitybrewery/catalyst:main"
|
||||
@@ -22,7 +92,7 @@ dockers:
|
||||
- docker/entrypoint.sh
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
- formats: tar.gz
|
||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
@@ -34,7 +104,7 @@ archives:
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
formats: zip
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
|
||||
2
Makefile
2
Makefile
@@ -4,7 +4,7 @@
|
||||
|
||||
.PHONY: install-golangci-lint
|
||||
install-golangci-lint:
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v2.1.6
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v2.4.0
|
||||
|
||||
.PHONY: install-ui
|
||||
install-ui:
|
||||
|
||||
@@ -432,6 +432,7 @@ func weeksAgo(c int) time.Time {
|
||||
|
||||
func dates(ticketCount int) (time.Time, time.Time) {
|
||||
const ticketsPerWeek = 10
|
||||
|
||||
weeks := ticketCount / ticketsPerWeek
|
||||
|
||||
created := gofakeit.DateRange(weeksAgo(1), weeksAgo(weeks+1)).UTC()
|
||||
|
||||
@@ -15,6 +15,7 @@ type DBTX interface {
|
||||
type Queries struct {
|
||||
*ReadQueries
|
||||
*WriteQueries
|
||||
|
||||
ReadDB *sql.DB
|
||||
WriteDB *sql.DB
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ type DBTX interface {
|
||||
type Queries struct {
|
||||
*ReadQueries
|
||||
*WriteQueries
|
||||
|
||||
ReadDB *sql.DB
|
||||
WriteDB *sql.DB
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestSQLMigration_UpAndDown(t *testing.T) {
|
||||
require.NoError(t, m.up(t.Context(), queries, dir, uploader))
|
||||
|
||||
// Table should exist
|
||||
_, err = queries.WriteDB.Exec("INSERT INTO test_table (name) VALUES ('foo')")
|
||||
_, err = queries.WriteDB.ExecContext(t.Context(), "INSERT INTO test_table (name) VALUES ('foo')")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ func TestVersionAndSetVersion(t *testing.T) {
|
||||
|
||||
db, err := sql.Open("sqlite3", ":memory:")
|
||||
require.NoError(t, err, "failed to open in-memory db")
|
||||
|
||||
defer db.Close()
|
||||
|
||||
ver, err := version(t.Context(), db)
|
||||
|
||||
290
app/rootstore/rootstore.go
Normal file
290
app/rootstore/rootstore.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package rootstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tus/tusd/v2/pkg/handler"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultFilePerm = os.FileMode(0o664)
|
||||
defaultDirectoryPerm = os.FileMode(0o754)
|
||||
)
|
||||
|
||||
const (
|
||||
// StorageKeyPath is the key of the path of uploaded file in handler.FileInfo.Storage.
|
||||
StorageKeyPath = "Path"
|
||||
// StorageKeyInfoPath is the key of the path of .info file in handler.FileInfo.Storage.
|
||||
StorageKeyInfoPath = "InfoPath"
|
||||
)
|
||||
|
||||
// RootStore is a file system based data store for tusd.
|
||||
type RootStore struct {
|
||||
root *os.Root
|
||||
}
|
||||
|
||||
func New(root *os.Root) RootStore {
|
||||
return RootStore{root: root}
|
||||
}
|
||||
|
||||
// UseIn sets this store as the core data store in the passed composer and adds
|
||||
// all possible extension to it.
|
||||
func (store RootStore) UseIn(composer *handler.StoreComposer) {
|
||||
composer.UseCore(store)
|
||||
composer.UseTerminater(store)
|
||||
composer.UseConcater(store)
|
||||
composer.UseLengthDeferrer(store)
|
||||
composer.UseContentServer(store)
|
||||
}
|
||||
|
||||
func (store RootStore) NewUpload(_ context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||
if info.ID == "" {
|
||||
info.ID = rand.Text()
|
||||
}
|
||||
|
||||
// The .info file's location can directly be deduced from the upload ID
|
||||
infoPath := store.infoPath(info.ID)
|
||||
// The binary file's location might be modified by the pre-create hook.
|
||||
var binPath string
|
||||
if info.Storage != nil && info.Storage[StorageKeyPath] != "" {
|
||||
binPath = info.Storage[StorageKeyPath]
|
||||
} else {
|
||||
binPath = store.defaultBinPath(info.ID)
|
||||
}
|
||||
|
||||
info.Storage = map[string]string{
|
||||
"Type": "rootstore",
|
||||
StorageKeyPath: binPath,
|
||||
StorageKeyInfoPath: infoPath,
|
||||
}
|
||||
|
||||
_ = store.root.MkdirAll(filepath.Dir(binPath), defaultDirectoryPerm)
|
||||
|
||||
// Create binary file with no content
|
||||
if err := store.root.WriteFile(binPath, nil, defaultFilePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
upload := &fileUpload{
|
||||
root: store.root,
|
||||
info: info,
|
||||
infoPath: infoPath,
|
||||
binPath: binPath,
|
||||
}
|
||||
|
||||
// writeInfo creates the file by itself if necessary
|
||||
if err := upload.writeInfo(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
func (store RootStore) GetUpload(_ context.Context, id string) (handler.Upload, error) {
|
||||
infoPath := store.infoPath(id)
|
||||
|
||||
data, err := fs.ReadFile(store.root.FS(), filepath.ToSlash(infoPath))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Interpret os.ErrNotExist as 404 Not Found
|
||||
err = handler.ErrNotFound
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info handler.FileInfo
|
||||
if err := json.Unmarshal(data, &info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the info file contains a custom path to the binary file, we use that. If not, we
|
||||
// fall back to the default value (although the Path property should always be set in recent
|
||||
// tusd versions).
|
||||
var binPath string
|
||||
if info.Storage != nil && info.Storage[StorageKeyPath] != "" {
|
||||
// No filepath.Join here because the joining already happened in NewUpload. Duplicate joining
|
||||
// with relative paths lead to incorrect paths
|
||||
binPath = info.Storage[StorageKeyPath]
|
||||
} else {
|
||||
binPath = store.defaultBinPath(info.ID)
|
||||
}
|
||||
|
||||
stat, err := store.root.Stat(binPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Interpret os.ErrNotExist as 404 Not Found
|
||||
err = handler.ErrNotFound
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info.Offset = stat.Size()
|
||||
|
||||
return &fileUpload{
|
||||
root: store.root,
|
||||
info: info,
|
||||
binPath: binPath,
|
||||
infoPath: infoPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (store RootStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
||||
return upload.(*fileUpload) //nolint:forcetypeassert
|
||||
}
|
||||
|
||||
func (store RootStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
|
||||
return upload.(*fileUpload) //nolint:forcetypeassert
|
||||
}
|
||||
|
||||
func (store RootStore) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
|
||||
return upload.(*fileUpload) //nolint:forcetypeassert
|
||||
}
|
||||
|
||||
func (store RootStore) AsServableUpload(upload handler.Upload) handler.ServableUpload {
|
||||
return upload.(*fileUpload) //nolint:forcetypeassert
|
||||
}
|
||||
|
||||
// defaultBinPath returns the path to the file storing the binary data, if it is
|
||||
// not customized using the pre-create hook.
|
||||
func (store RootStore) defaultBinPath(id string) string {
|
||||
return id
|
||||
}
|
||||
|
||||
// infoPath returns the path to the .info file storing the file's info.
|
||||
func (store RootStore) infoPath(id string) string {
|
||||
return id + ".info"
|
||||
}
|
||||
|
||||
type fileUpload struct {
|
||||
root *os.Root
|
||||
|
||||
// info stores the current information about the upload
|
||||
info handler.FileInfo
|
||||
// infoPath is the path to the .info file
|
||||
infoPath string
|
||||
// binPath is the path to the binary file (which has no extension)
|
||||
binPath string
|
||||
}
|
||||
|
||||
func (upload *fileUpload) GetInfo(_ context.Context) (handler.FileInfo, error) {
|
||||
return upload.info, nil
|
||||
}
|
||||
|
||||
func (upload *fileUpload) WriteChunk(_ context.Context, _ int64, src io.Reader) (int64, error) {
|
||||
file, err := upload.root.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Avoid the use of defer file.Close() here to ensure no errors are lost
|
||||
// See https://github.com/tus/tusd/issues/698.
|
||||
|
||||
n, err := io.Copy(file, src)
|
||||
upload.info.Offset += n
|
||||
|
||||
if err != nil {
|
||||
file.Close()
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, file.Close()
|
||||
}
|
||||
|
||||
func (upload *fileUpload) GetReader(_ context.Context) (io.ReadCloser, error) {
|
||||
return upload.root.Open(upload.binPath)
|
||||
}
|
||||
|
||||
func (upload *fileUpload) Terminate(_ context.Context) error {
|
||||
// We ignore errors indicating that the files cannot be found because we want
|
||||
// to delete them anyways. The files might be removed by a cron job for cleaning up
|
||||
// or some file might have been removed when tusd crashed during the termination.
|
||||
err := upload.root.Remove(upload.binPath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
err = upload.root.Remove(upload.infoPath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (upload *fileUpload) ConcatUploads(_ context.Context, uploads []handler.Upload) (err error) {
|
||||
file, err := upload.root.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Ensure that close error is propagated, if it occurs.
|
||||
// See https://github.com/tus/tusd/issues/698.
|
||||
cerr := file.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
for _, partialUpload := range uploads {
|
||||
if err := partialUpload.(*fileUpload).appendTo(file); err != nil { //nolint:forcetypeassert
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (upload *fileUpload) appendTo(file *os.File) error {
|
||||
src, err := upload.root.Open(upload.binPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(file, src); err != nil {
|
||||
src.Close()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return src.Close()
|
||||
}
|
||||
|
||||
func (upload *fileUpload) DeclareLength(_ context.Context, length int64) error {
|
||||
upload.info.Size = length
|
||||
upload.info.SizeIsDeferred = false
|
||||
|
||||
return upload.writeInfo()
|
||||
}
|
||||
|
||||
// writeInfo updates the entire information. Everything will be overwritten.
|
||||
func (upload *fileUpload) writeInfo() error {
|
||||
data, err := json.Marshal(upload.info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = upload.root.MkdirAll(filepath.Dir(upload.infoPath), defaultDirectoryPerm)
|
||||
|
||||
return upload.root.WriteFile(upload.infoPath, data, defaultFilePerm)
|
||||
}
|
||||
|
||||
func (upload *fileUpload) FinishUpload(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (upload *fileUpload) ServeContent(_ context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
http.ServeFileFS(w, r, upload.root.FS(), filepath.ToSlash(upload.binPath))
|
||||
|
||||
return nil
|
||||
}
|
||||
391
app/rootstore/rootstore_test.go
Normal file
391
app/rootstore/rootstore_test.go
Normal file
@@ -0,0 +1,391 @@
|
||||
package rootstore
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tus/tusd/v2/pkg/handler"
|
||||
)
|
||||
|
||||
// Test interface implementation of FSStore.
|
||||
var (
|
||||
_ handler.DataStore = RootStore{}
|
||||
_ handler.TerminaterDataStore = RootStore{}
|
||||
_ handler.ConcaterDataStore = RootStore{}
|
||||
_ handler.LengthDeferrerDataStore = RootStore{}
|
||||
)
|
||||
|
||||
func TestFSStore(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
root, err := os.OpenRoot(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { root.Close() })
|
||||
|
||||
store := New(root)
|
||||
ctx := t.Context()
|
||||
|
||||
// Create new upload
|
||||
upload, err := store.NewUpload(ctx, handler.FileInfo{
|
||||
Size: 42,
|
||||
MetaData: map[string]string{
|
||||
"hello": "world",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, upload)
|
||||
|
||||
// Check info without writing
|
||||
info, err := upload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 42, info.Size)
|
||||
assert.EqualValues(t, 0, info.Offset)
|
||||
assert.Equal(t, handler.MetaData{"hello": "world"}, info.MetaData)
|
||||
assert.Len(t, info.Storage, 3)
|
||||
assert.Equal(t, "rootstore", info.Storage["Type"])
|
||||
assert.Equal(t, info.ID, info.Storage["Path"])
|
||||
assert.Equal(t, info.ID+".info", info.Storage["InfoPath"])
|
||||
|
||||
// Write data to upload
|
||||
bytesWritten, err := upload.WriteChunk(ctx, 0, strings.NewReader("hello world"))
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, len("hello world"), bytesWritten)
|
||||
|
||||
// Check new offset
|
||||
info, err = upload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 42, info.Size)
|
||||
assert.EqualValues(t, 11, info.Offset)
|
||||
|
||||
// Read content
|
||||
reader, err := upload.GetReader(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "hello world", string(content))
|
||||
reader.Close()
|
||||
|
||||
// Serve content
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
r.Header.Set("Range", "bytes=0-4")
|
||||
|
||||
err = store.AsServableUpload(upload).ServeContent(t.Context(), w, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, http.StatusPartialContent, w.Code)
|
||||
assert.Equal(t, "5", w.Header().Get("Content-Length"))
|
||||
assert.Equal(t, "text/plain; charset=utf-8", w.Header().Get("Content-Type"))
|
||||
assert.Equal(t, "bytes 0-4/11", w.Header().Get("Content-Range"))
|
||||
assert.NotEmpty(t, w.Header().Get("Last-Modified"))
|
||||
assert.Equal(t, "hello", w.Body.String())
|
||||
|
||||
// Terminate upload
|
||||
require.NoError(t, store.AsTerminatableUpload(upload).Terminate(ctx))
|
||||
|
||||
// Test if upload is deleted
|
||||
upload, err = store.GetUpload(ctx, info.ID)
|
||||
assert.Nil(t, upload)
|
||||
assert.Equal(t, handler.ErrNotFound, err)
|
||||
}
|
||||
|
||||
// TestCreateDirectories tests whether an upload with a slash in its ID causes
|
||||
// the correct directories to be created.
|
||||
func TestFSStoreCreateDirectories(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmp := t.TempDir()
|
||||
|
||||
root, err := os.OpenRoot(tmp)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { root.Close() })
|
||||
|
||||
store := New(root)
|
||||
ctx := t.Context()
|
||||
|
||||
// Create new upload
|
||||
upload, err := store.NewUpload(ctx, handler.FileInfo{
|
||||
ID: "hello/world/123",
|
||||
Size: 42,
|
||||
MetaData: map[string]string{
|
||||
"hello": "world",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, upload)
|
||||
|
||||
// Check info without writing
|
||||
info, err := upload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 42, info.Size)
|
||||
assert.EqualValues(t, 0, info.Offset)
|
||||
assert.Equal(t, handler.MetaData{"hello": "world"}, info.MetaData)
|
||||
assert.Len(t, info.Storage, 3)
|
||||
assert.Equal(t, "rootstore", info.Storage["Type"])
|
||||
assert.Equal(t, filepath.FromSlash(info.ID), info.Storage["Path"])
|
||||
assert.Equal(t, filepath.FromSlash(info.ID+".info"), info.Storage["InfoPath"])
|
||||
|
||||
// Write data to upload
|
||||
bytesWritten, err := upload.WriteChunk(ctx, 0, strings.NewReader("hello world"))
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, len("hello world"), bytesWritten)
|
||||
|
||||
// Check new offset
|
||||
info, err = upload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 42, info.Size)
|
||||
assert.EqualValues(t, 11, info.Offset)
|
||||
|
||||
// Read content
|
||||
reader, err := upload.GetReader(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "hello world", string(content))
|
||||
reader.Close()
|
||||
|
||||
// Check that the file and directory exists on disk
|
||||
statInfo, err := os.Stat(filepath.Join(tmp, "hello/world/123"))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, statInfo.Mode().IsRegular())
|
||||
assert.EqualValues(t, 11, statInfo.Size())
|
||||
statInfo, err = os.Stat(filepath.Join(tmp, "hello/world/"))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, statInfo.Mode().IsDir())
|
||||
|
||||
// Terminate upload
|
||||
require.NoError(t, store.AsTerminatableUpload(upload).Terminate(ctx))
|
||||
|
||||
// Test if upload is deleted
|
||||
upload, err = store.GetUpload(ctx, info.ID)
|
||||
assert.Nil(t, upload)
|
||||
assert.Equal(t, handler.ErrNotFound, err)
|
||||
}
|
||||
|
||||
func TestFSStoreNotFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
root, err := os.OpenRoot(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { root.Close() })
|
||||
|
||||
store := New(root)
|
||||
ctx := t.Context()
|
||||
|
||||
upload, err := store.GetUpload(ctx, "upload-that-does-not-exist")
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, handler.ErrNotFound, err)
|
||||
assert.Nil(t, upload)
|
||||
}
|
||||
|
||||
func TestFSStoreConcatUploads(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmp := t.TempDir()
|
||||
|
||||
root, err := os.OpenRoot(tmp)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { root.Close() })
|
||||
|
||||
store := New(root)
|
||||
ctx := t.Context()
|
||||
|
||||
// Create new upload to hold concatenated upload
|
||||
finUpload, err := store.NewUpload(ctx, handler.FileInfo{Size: 9})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, finUpload)
|
||||
|
||||
finInfo, err := finUpload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
finID := finInfo.ID
|
||||
|
||||
// Create three uploads for concatenating
|
||||
partialUploads := make([]handler.Upload, 3)
|
||||
contents := []string{
|
||||
"abc",
|
||||
"def",
|
||||
"ghi",
|
||||
}
|
||||
|
||||
for i := range 3 {
|
||||
upload, err := store.NewUpload(ctx, handler.FileInfo{Size: 3})
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := upload.WriteChunk(ctx, 0, strings.NewReader(contents[i]))
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 3, n)
|
||||
|
||||
partialUploads[i] = upload
|
||||
}
|
||||
|
||||
err = store.AsConcatableUpload(finUpload).ConcatUploads(ctx, partialUploads)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check offset
|
||||
finUpload, err = store.GetUpload(ctx, finID)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := finUpload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 9, info.Size)
|
||||
assert.EqualValues(t, 9, info.Offset)
|
||||
|
||||
// Read content
|
||||
reader, err := finUpload.GetReader(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "abcdefghi", string(content))
|
||||
reader.Close()
|
||||
}
|
||||
|
||||
func TestFSStoreDeclareLength(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmp := t.TempDir()
|
||||
|
||||
root, err := os.OpenRoot(tmp)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { root.Close() })
|
||||
|
||||
store := New(root)
|
||||
ctx := t.Context()
|
||||
|
||||
upload, err := store.NewUpload(ctx, handler.FileInfo{
|
||||
Size: 0,
|
||||
SizeIsDeferred: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, upload)
|
||||
|
||||
info, err := upload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 0, info.Size)
|
||||
assert.True(t, info.SizeIsDeferred)
|
||||
|
||||
err = store.AsLengthDeclarableUpload(upload).DeclareLength(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedInfo, err := upload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 100, updatedInfo.Size)
|
||||
assert.False(t, updatedInfo.SizeIsDeferred)
|
||||
}
|
||||
|
||||
// TestCustomRelativePath tests whether the upload's destination can be customized
|
||||
// relative to the storage directory.
|
||||
func TestFSStoreCustomRelativePath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmp := t.TempDir()
|
||||
|
||||
root, err := os.OpenRoot(tmp)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { root.Close() })
|
||||
|
||||
store := New(root)
|
||||
ctx := t.Context()
|
||||
|
||||
// Create new upload
|
||||
upload, err := store.NewUpload(ctx, handler.FileInfo{
|
||||
ID: "folder1/info",
|
||||
Size: 42,
|
||||
Storage: map[string]string{
|
||||
"Path": "./folder2/bin",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, upload)
|
||||
|
||||
// Check info without writing
|
||||
info, err := upload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 42, info.Size)
|
||||
assert.EqualValues(t, 0, info.Offset)
|
||||
assert.Len(t, info.Storage, 3)
|
||||
assert.Equal(t, "rootstore", info.Storage["Type"])
|
||||
assert.Equal(t, filepath.FromSlash("./folder2/bin"), info.Storage["Path"])
|
||||
assert.Equal(t, filepath.FromSlash("folder1/info.info"), info.Storage["InfoPath"])
|
||||
|
||||
// Write data to upload
|
||||
bytesWritten, err := upload.WriteChunk(ctx, 0, strings.NewReader("hello world"))
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, len("hello world"), bytesWritten)
|
||||
|
||||
// Check new offset
|
||||
info, err = upload.GetInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 42, info.Size)
|
||||
assert.EqualValues(t, 11, info.Offset)
|
||||
|
||||
// Read content
|
||||
reader, err := upload.GetReader(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "hello world", string(content))
|
||||
reader.Close()
|
||||
|
||||
// Check that the output file and info file exist on disk
|
||||
statInfo, err := os.Stat(filepath.Join(tmp, "folder2/bin"))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, statInfo.Mode().IsRegular())
|
||||
assert.EqualValues(t, 11, statInfo.Size())
|
||||
statInfo, err = os.Stat(filepath.Join(tmp, "folder1/info.info"))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, statInfo.Mode().IsRegular())
|
||||
|
||||
// Terminate upload
|
||||
require.NoError(t, store.AsTerminatableUpload(upload).Terminate(ctx))
|
||||
|
||||
// Test if upload is deleted
|
||||
upload, err = store.GetUpload(ctx, info.ID)
|
||||
assert.Nil(t, upload)
|
||||
assert.Equal(t, handler.ErrNotFound, err)
|
||||
}
|
||||
|
||||
// TestCustomAbsolutePath tests whether the upload's destination can be customized
|
||||
// using an absolute path to the storage directory.
|
||||
func TestFSStoreCustomAbsolutePath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
root, err := os.OpenRoot(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { root.Close() })
|
||||
|
||||
store := New(root)
|
||||
|
||||
// Create new upload, but the Path property points to a directory
|
||||
// outside of the directory given to FSStore
|
||||
binPath := filepath.Join(t.TempDir(), "dir/my-upload.bin")
|
||||
_, err = store.NewUpload(t.Context(), handler.FileInfo{
|
||||
ID: "my-upload",
|
||||
Size: 42,
|
||||
Storage: map[string]string{
|
||||
"Path": binPath,
|
||||
},
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = os.Stat(binPath)
|
||||
require.Error(t, err)
|
||||
}
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/tus/tusd/v2/pkg/filelocker"
|
||||
tusd "github.com/tus/tusd/v2/pkg/handler"
|
||||
"github.com/tus/tusd/v2/pkg/rootstore"
|
||||
|
||||
"github.com/SecurityBrewery/catalyst/app/auth"
|
||||
"github.com/SecurityBrewery/catalyst/app/database"
|
||||
"github.com/SecurityBrewery/catalyst/app/database/sqlc"
|
||||
"github.com/SecurityBrewery/catalyst/app/rootstore"
|
||||
"github.com/SecurityBrewery/catalyst/app/upload"
|
||||
)
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ func Test_marshal(t *testing.T) {
|
||||
out := marshal(data)
|
||||
|
||||
var res map[string]any
|
||||
|
||||
err := json.Unmarshal([]byte(out), &res)
|
||||
require.NoError(t, err, "invalid json")
|
||||
|
||||
|
||||
6
go.mod
6
go.mod
@@ -1,17 +1,15 @@
|
||||
module github.com/SecurityBrewery/catalyst
|
||||
|
||||
go 1.24
|
||||
go 1.25
|
||||
|
||||
tool (
|
||||
github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen
|
||||
github.com/sqlc-dev/sqlc/cmd/sqlc
|
||||
)
|
||||
|
||||
replace github.com/tus/tusd/v2 v2.8.0 => github.com/SecurityBrewery/tusd/v2 v2.0.0-20250628083448-4def5f97f3a6
|
||||
|
||||
require (
|
||||
github.com/brianvoe/gofakeit/v7 v7.2.1
|
||||
github.com/go-chi/chi/v5 v5.2.1
|
||||
github.com/go-chi/chi/v5 v5.2.2
|
||||
github.com/go-co-op/gocron/v2 v2.16.2
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/google/martian/v3 v3.3.3
|
||||
|
||||
28
go.sum
28
go.sum
@@ -6,8 +6,6 @@ github.com/Acconut/go-httptest-recorder v1.0.0 h1:TAv2dfnqp/l+SUvIaMAUK4GeN4+wqb
|
||||
github.com/Acconut/go-httptest-recorder v1.0.0/go.mod h1:CwQyhTH1kq/gLyWiRieo7c0uokpu3PXeyF/nZjUNtmM=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
|
||||
github.com/SecurityBrewery/tusd/v2 v2.0.0-20250628083448-4def5f97f3a6 h1:RVwfrJlnyEOigrDU95mJI/DyoaWWUewE8S4bT8PARlg=
|
||||
github.com/SecurityBrewery/tusd/v2 v2.0.0-20250628083448-4def5f97f3a6/go.mod h1:ZfOwo1YI2XpbsvMDLNmLDedopkC7QVebdvywnSNiluA=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
|
||||
@@ -34,8 +32,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/getkin/kin-openapi v0.132.0 h1:3ISeLMsQzcb5v26yeJrBcdTCEQTag36ZjaGk7MIRUwk=
|
||||
github.com/getkin/kin-openapi v0.132.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58=
|
||||
github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8=
|
||||
github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
|
||||
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-co-op/gocron/v2 v2.16.2 h1:r08P663ikXiulLT9XaabkLypL/W9MoCIbqgQoAutyX4=
|
||||
github.com/go-co-op/gocron/v2 v2.16.2/go.mod h1:4YTLGCCAH75A5RlQ6q+h+VacO7CgjkgP0EJ+BEOXRSI=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
@@ -188,6 +186,8 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/tus/lockfile v1.2.0 h1:92dMoNyeb5zaNi8eQ79WLqt/npUWUFkaM5ZM9kOMIDM=
|
||||
github.com/tus/lockfile v1.2.0/go.mod h1:JyfWCHNyfd7eGxudGohrkt38kuKRki6L0JH82p2e+mc=
|
||||
github.com/tus/tusd/v2 v2.8.0 h1:X2jGxQ05jAW4inDd2ogmOKqwnb4c/D0lw2yhgHayWyU=
|
||||
github.com/tus/tusd/v2 v2.8.0/go.mod h1:3/zEOVQQIwmJhvNam8phV4x/UQt68ZmZiTzeuJUNhVo=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v3 v3.3.8 h1:BzolUExliMdet9NlJ/u4m5vHSotJ3PzEqSAZ1oPMa/E=
|
||||
@@ -203,16 +203,16 @@ github.com/wneessen/go-mail v0.6.2/go.mod h1:L/PYjPK3/2ZlNb2/FjEBIn9n1rUWjW+Toy5
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
|
||||
11
main.go
11
main.go
@@ -40,8 +40,11 @@ func main() {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "serve",
|
||||
Usage: "Start the Catalyst server",
|
||||
Name: "serve",
|
||||
Usage: "Start the Catalyst server",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{Name: "http", Usage: "HTTP listen address", Value: ":8090"},
|
||||
},
|
||||
Action: serve,
|
||||
},
|
||||
{
|
||||
@@ -108,8 +111,10 @@ func serve(ctx context.Context, command *cli.Command) error {
|
||||
|
||||
defer cleanup()
|
||||
|
||||
addr := command.String("http")
|
||||
|
||||
server := &http.Server{
|
||||
Addr: ":8090",
|
||||
Addr: addr,
|
||||
Handler: catalyst,
|
||||
ReadTimeout: 10 * time.Minute,
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ export function useAPI() {
|
||||
const authStore = useAuthStore()
|
||||
return new DefaultApi(
|
||||
new Configuration({
|
||||
basePath: 'http://localhost:8090/api',
|
||||
basePath: '/api',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Accept: 'application/json',
|
||||
|
||||
Reference in New Issue
Block a user