models

package
v0.0.3 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 11, 2024 License: MIT Imports: 13 Imported by: 0

Documentation

Overview

* @Author: rui.li * @Date: 2024-02-27 17:49:17 * @LastEditors: rui.li * @LastEditTime: 2024-02-28 10:57:09 * @FilePath: /DynamicBucketTusd/pkg/models/context.go

* @Author: rui.li * @Date: 2024-02-28 09:56:40 * @LastEditors: rui.li * @LastEditTime: 2024-02-28 10:24:43 * @FilePath: /DynamicBucketTusd/pkg/models/def.go

* @Author: rui.li * @Date: 2024-02-27 17:49:17 * @LastEditors: rui.li * @LastEditTime: 2024-02-28 10:14:41 * @FilePath: /DynamicBucketTusd/pkg/models/hooks.go

* @Author: rui.li * @Date: 2024-02-27 17:49:17 * @LastEditors: rui.li * @LastEditTime: 2024-02-28 10:22:30 * @FilePath: /DynamicBucketTusd/pkg/models/metrics.go

Index

Constants

View Source
const CurrentUploadDraftInteropVersion = "4"
View Source
const UploadLengthDeferred = "1"

Variables

View Source
var (
	ReExtractFileID  = regexp.MustCompile(`([^/]+)\/?$`)
	ReForwardedHost  = regexp.MustCompile(`host="?([^;"]+)`)
	ReForwardedProto = regexp.MustCompile(`proto=(https?)`)
	ReMimeType       = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`)
)
View Source
var (
	ErrUnsupportedVersion               = NewError("ERR_UNSUPPORTED_VERSION", "missing, invalid or unsupported Tus-Resumable header", http.StatusPreconditionFailed)
	ErrMaxSizeExceeded                  = NewError("ERR_MAX_SIZE_EXCEEDED", "maximum size exceeded", http.StatusRequestEntityTooLarge)
	ErrInvalidContentType               = NewError("ERR_INVALID_CONTENT_TYPE", "missing or invalid Content-Type header", http.StatusBadRequest)
	ErrInvalidUploadLength              = NewError("ERR_INVALID_UPLOAD_LENGTH", "missing or invalid Upload-Length header", http.StatusBadRequest)
	ErrInvalidOffset                    = NewError("ERR_INVALID_OFFSET", "missing or invalid Upload-Offset header", http.StatusBadRequest)
	ErrNotFound                         = NewError("ERR_UPLOAD_NOT_FOUND", "upload not found", http.StatusNotFound)
	ErrFileLocked                       = NewError("ERR_UPLOAD_LOCKED", "file currently locked", http.StatusLocked)
	ErrLockTimeout                      = NewError("ERR_LOCK_TIMEOUT", "failed to acquire lock before timeout", http.StatusInternalServerError)
	ErrMismatchOffset                   = NewError("ERR_MISMATCHED_OFFSET", "mismatched offset", http.StatusConflict)
	ErrSizeExceeded                     = NewError("ERR_UPLOAD_SIZE_EXCEEDED", "upload's size exceeded", http.StatusRequestEntityTooLarge)
	ErrNotImplemented                   = NewError("ERR_NOT_IMPLEMENTED", "feature not implemented", http.StatusNotImplemented)
	ErrUploadNotFinished                = NewError("ERR_UPLOAD_NOT_FINISHED", "one of the partial uploads is not finished", http.StatusBadRequest)
	ErrInvalidConcat                    = NewError("ERR_INVALID_CONCAT", "invalid Upload-Concat header", http.StatusBadRequest)
	ErrModifyFinal                      = NewError("ERR_MODIFY_FINAL", "modifying a final upload is not allowed", http.StatusForbidden)
	ErrUploadLengthAndUploadDeferLength = NewError("ERR_AMBIGUOUS_UPLOAD_LENGTH", "provided both Upload-Length and Upload-Defer-Length", http.StatusBadRequest)
	ErrInvalidUploadDeferLength         = NewError("ERR_INVALID_UPLOAD_LENGTH_DEFER", "invalid Upload-Defer-Length header", http.StatusBadRequest)
	ErrUploadStoppedByServer            = NewError("ERR_UPLOAD_STOPPED", "upload has been stopped by server", http.StatusBadRequest)
	ErrUploadRejectedByServer           = NewError("ERR_UPLOAD_REJECTED", "upload creation has been rejected by server", http.StatusBadRequest)
	ErrUploadInterrupted                = NewError("ERR_UPLOAD_INTERRUPTED", "upload has been interrupted by another request for this upload resource", http.StatusBadRequest)
	ErrServerShutdown                   = NewError("ERR_SERVER_SHUTDOWN", "request has been interrupted because the server is shutting down", http.StatusServiceUnavailable)
	ErrOriginNotAllowed                 = NewError("ERR_ORIGIN_NOT_ALLOWED", "request origin is not allowed", http.StatusForbidden)

	// These two responses are 500 for backwards compatability. Clients might receive a timeout response
	// when the upload got interrupted. Most clients will not retry 4XX but only 5XX, so we responsd with 500 here.
	ErrReadTimeout     = NewError("ERR_READ_TIMEOUT", "timeout while reading request body", http.StatusInternalServerError)
	ErrConnectionReset = NewError("ERR_CONNECTION_RESET", "TCP connection reset by peer", http.StatusInternalServerError)
)

Functions

func NewDelayedContext

func NewDelayedContext(parent context.Context, delay time.Duration) context.Context

newDelayedContext returns a context that is cancelled with a delay. If the parent context is done, the new context will also be cancelled but only after waiting the specified delay. Note: The parent context MUST be cancelled or otherwise this will leak resources. In the case of http.Request.Context, the net/http package ensures that the context is always cancelled.

Types

type BodyReader

type BodyReader struct {
	// contains filtered or unexported fields
}

BodyReader is an io.Reader, which is intended to wrap the request body reader. If an error occurr during reading the request body, it will not return this error to the reading entity, but instead store the error and close the io.Reader, so that the error can be checked afterwards. This is helpful, so that the stores do not have to handle the error but this can instead be done in the handler. In addition, the BodyReader keeps track of how many bytes were read.

func NewBodyReader

func NewBodyReader(c *HttpContext, maxSize int64) *BodyReader

func (*BodyReader) BytesRead

func (r *BodyReader) BytesRead() int64

func (*BodyReader) CloseWithError

func (r *BodyReader) CloseWithError(err error)

func (BodyReader) HasError

func (r BodyReader) HasError() error

func (*BodyReader) Read

func (r *BodyReader) Read(b []byte) (int, error)

func (*BodyReader) SetOnReadDone

func (r *BodyReader) SetOnReadDone(f func())

type ConcatableUpload

type ConcatableUpload interface {
	// ConcatUploads concatenates the content from the provided partial uploads
	// and writes the result in the destination upload.
	// The caller (usually the handler) must and will ensure that this
	// destination upload has been created before with enough space to hold all
	// partial uploads. The order, in which the partial uploads are supplied,
	// must be respected during concatenation.
	ConcatUploads(ctx context.Context, partialUploads []Upload) error
}

type ConcaterDataStore

type ConcaterDataStore interface {
	AsConcatableUpload(upload Upload) ConcatableUpload
}

ConcaterDataStore is the interface required to be implemented if the Concatenation extension should be enabled. Only in this case, the handler will parse and respect the Upload-Concat header.

type DataStore

type DataStore interface {
	// Create a new upload using the size as the file's length. The method must
	// return an unique id which is used to identify the upload. If no backend
	// (e.g. Riak) specifes the id you may want to use the uid package to
	// generate one. The properties Size and MetaData will be filled.
	NewUpload(ctx context.Context, info FileInfo) (upload Upload, err error)

	// GetUpload fetches the upload with a given ID. If no such upload can be found,
	// ErrNotFound must be returned.
	GetUpload(ctx context.Context, id string) (upload Upload, err error)
}

DataStore is the base interface for storages to implement. It provides functions to create new uploads and fetch existing ones.

Note: the context values passed to all functions is not the request's context, but a similar context. See HookEvent.Context for more details.

type Error

type Error struct {
	ErrorCode    string
	Message      string
	HTTPResponse HTTPResponse
}

Error represents an error with the intent to be sent in the HTTP response to the client. Therefore, it also contains a HTTPResponse, next to an error code and error message.

func NewError

func NewError(errCode string, message string, statusCode int) Error

NewError constructs a new Error object with the given error code and message. The corresponding HTTP response will have the provided status code and a body consisting of the error details. responses. See the net/http package for standardized status codes.

func (Error) Error

func (e Error) Error() string

func (Error) Is

func (e1 Error) Is(target error) bool

type ErrorsTotalMap

type ErrorsTotalMap struct {
	// contains filtered or unexported fields
}

ErrorsTotalMap stores the counters for the different HTTP errors.

func (*ErrorsTotalMap) Load

func (e *ErrorsTotalMap) Load() map[ErrorsTotalMapEntry]*uint64

Load retrieves the map of the counter pointers atomically

type ErrorsTotalMapEntry

type ErrorsTotalMapEntry struct {
	ErrorCode  string
	StatusCode int
}

type FileInfo

type FileInfo struct {
	// ID is the unique identifier of the upload resource.
	ID string
	// Total file size in bytes specified in the NewUpload call
	Size int64
	// Indicates whether the total file size is deferred until later
	SizeIsDeferred bool
	// Offset in bytes (zero-based)
	Offset   int64
	MetaData MetaData
	// Indicates that this is a partial upload which will later be used to form
	// a final upload by concatenation. Partial uploads should not be processed
	// when they are finished since they are only incomplete chunks of files.
	IsPartial bool
	// Indicates that this is a final upload
	IsFinal bool
	// If the upload is a final one (see IsFinal) this will be a non-empty
	// ordered slice containing the ids of the uploads of which the final upload
	// will consist after concatenation.
	PartialUploads []string
	// Storage contains information about where the data storage saves the upload,
	// for example a file path. The available values vary depending on what data
	// store is used. This map may also be nil.
	Storage map[string]string
	// contains filtered or unexported fields
}

FileInfo contains information about a single upload resource.

func (FileInfo) SetStopUpload

func (f FileInfo) SetStopUpload(stopUpload func(HTTPResponse))

func (FileInfo) StopUpload

func (f FileInfo) StopUpload(response HTTPResponse)

StopUpload interrupts a running upload from the server-side. This means that the current request body is closed, so that the data store does not get any more data. Furthermore, a response is sent to notify the client of the interrupting and the upload is terminated (if supported by the data store), so the upload cannot be resumed anymore. The response to the client can be optionally modified by providing values in the HTTPResponse struct.

type FileInfoChanges

type FileInfoChanges struct {
	// If ID is not empty, it will be passed to the data store, allowing
	// hooks to influence the upload ID. Be aware that a data store is not required to
	// respect a pre-defined upload ID and might overwrite or modify it. However,
	// all data stores in the github.com/tus/tusd package do respect pre-defined IDs.
	ID string

	// If MetaData is not nil, it replaces the entire user-defined meta data from
	// the upload creation request. You can add custom meta data fields this way
	// or ensure that only certain fields from the user-defined meta data are saved.
	// If you want to retain only specific entries from the user-defined meta data, you must
	// manually copy them into this MetaData field.
	// If you do not want to store any meta data, set this field to an empty map (`MetaData{}`).
	// If you want to keep the entire user-defined meta data, set this field to nil.
	MetaData MetaData

	// If Storage is not nil, it is passed to the data store to allow for minor adjustments
	// to the upload storage (e.g. destination file name). The details are specific for each
	// data store and should be looked up in their respective documentation.
	// Please be aware that this behavior is currently not supported by any data store in
	// the github.com/tus/tusd package.
	Storage map[string]string
}

FileInfoChanges collects changes the should be made to a FileInfo struct. This can be done using the PreUploadCreateCallback to modify certain properties before an upload is created. Properties which should not be modified (e.g. Size or Offset) are intentionally left out here.

type HTTPHeader

type HTTPHeader map[string]string

type HTTPRequest

type HTTPRequest struct {
	// Method is the HTTP method, e.g. POST or PATCH.
	Method string
	// URI is the full HTTP request URI, e.g. /files/fooo.
	URI string
	// RemoteAddr contains the network address that sent the request.
	RemoteAddr string
	// Header contains all HTTP headers as present in the HTTP request.
	Header http.Header
}

HTTPRequest contains basic details of an incoming HTTP request.

type HTTPResponse

type HTTPResponse struct {
	// StatusCode is status code, e.g. 200 or 400.
	StatusCode int
	// Body is the response body.
	Body string
	// Header contains additional HTTP headers for the response.
	Header HTTPHeader
}

HTTPResponse contains basic details of an outgoing HTTP response.

func (HTTPResponse) MergeWith

func (resp1 HTTPResponse) MergeWith(resp2 HTTPResponse) HTTPResponse

MergeWith returns a copy of resp1, where non-default values from resp2 overwrite values from resp1.

func (HTTPResponse) WriteTo

func (resp HTTPResponse) WriteTo(w http.ResponseWriter)

writeTo writes the HTTP response into w, as specified by the fields in resp.

type HookEvent

type HookEvent struct {
	// Context provides access to the context from the HTTP request. This context is
	// not the exact value as the request context from http.Request.Context() but
	// a similar context that retains the same values as the request context. In
	// addition, Context will be cancelled after a short delay when the request context
	// is done. This delay is controlled by Config.GracefulRequestCompletionTimeout.
	//
	// The reason is that we want stores to be able to continue processing a request after
	// its context has been cancelled. For example, assume a PATCH request is incoming. If
	// the end-user pauses the upload, the connection is closed causing the request context
	// to be cancelled immediately. However, we want the store to be able to save the last
	// few bytes that were transmitted before the request was aborted. To allow this, we
	// copy the request context but cancel it with a brief delay to give the data store
	// time to finish its operations.
	Context context.Context `json:"-"`
	// Upload contains information about the upload that caused this hook
	// to be fired.
	Upload FileInfo
	// HTTPRequest contains details about the HTTP request that reached
	// tusd.
	HTTPRequest HTTPRequest
}

HookEvent represents an event from tusd which can be handled by the application.

func NewHookEvent

func NewHookEvent(c *HttpContext, info FileInfo) HookEvent

type HttpContext

type HttpContext struct {
	context.Context

	// body is nil by default and set by the user if the request body is consumed.
	Body *BodyReader

	// log is the logger for this request. It gets extended with more properties as the
	// request progresses and is identified.
	Log *slog.Logger
	// contains filtered or unexported fields
}

httpContext is wrapper around context.Context that also carries the corresponding HTTP request and response writer, as well as an optional body reader

func (HttpContext) GetCancel

func (c HttpContext) GetCancel() context.CancelCauseFunc

func (HttpContext) GetReq

func (c HttpContext) GetReq() *http.Request

func (HttpContext) GetRes

func (c HttpContext) GetRes() http.ResponseWriter

func (HttpContext) GetResC

func (c HttpContext) GetResC() *http.ResponseController

func (HttpContext) Value

func (c HttpContext) Value(key any) any

type LengthDeclarableUpload

type LengthDeclarableUpload interface {
	DeclareLength(ctx context.Context, length int64) error
}

type LengthDeferrerDataStore

type LengthDeferrerDataStore interface {
	AsLengthDeclarableUpload(upload Upload) LengthDeclarableUpload
}

LengthDeferrerDataStore is the interface that must be implemented if the creation-defer-length extension should be enabled. The extension enables a client to upload files when their total size is not yet known. Instead, the client must send the total size as soon as it becomes known.

type Lock

type Lock interface {
	// Lock attempts to obtain an exclusive lock for the upload specified
	// by its id.
	// If the lock can be acquired, it will return without error. The requestUnlock
	// callback is invoked when another caller attempts to create a lock. In this
	// case, the holder of the lock should attempt to release the lock as soon
	// as possible
	// If the lock is already held, the holder's requestUnlock function will be
	// invoked to request the lock to be released. If the context is cancelled before
	// the lock can be acquired, ErrLockTimeout will be returned without acquiring
	// the lock.
	Lock(ctx context.Context, requestUnlock func()) error
	// Unlock releases an existing lock for the given upload.
	Unlock() error
}

Lock is the interface for a lock as returned from a Locker.

type Locker

type Locker interface {
	// NewLock creates a new unlocked lock object for the given upload ID.
	NewLock(id string) (Lock, error)
}

Locker is the interface required for custom lock persisting mechanisms. Common ways to store this information is in memory, on disk or using an external service, such as Redis. When multiple processes are attempting to access an upload, whether it be by reading or writing, a synchronization mechanism is required to prevent data corruption, especially to ensure correct offset values and the proper order of chunks inside a single upload.

type MetaData

type MetaData map[string]string

type Metrics

type Metrics struct {
	// RequestTotal counts the number of incoming requests per method
	RequestsTotal map[string]*uint64
	// ErrorsTotal counts the number of returned errors by their message
	ErrorsTotal       *ErrorsTotalMap
	BytesReceived     *uint64
	UploadsFinished   *uint64
	UploadsCreated    *uint64
	UploadsTerminated *uint64
}

Metrics provides numbers about the usage of the tusd handler. Since these may be accessed from multiple goroutines, it is necessary to read and modify them atomically using the functions exposed in the sync/atomic package, such as atomic.LoadUint64. In addition the maps must not be modified to prevent data races.

func NewMetrics

func NewMetrics() Metrics

func (Metrics) IncBytesReceived

func (m Metrics) IncBytesReceived(delta uint64)

incBytesReceived increases the number of received bytes atomically be the specified number.

func (Metrics) IncErrorsTotal

func (m Metrics) IncErrorsTotal(err Error)

incErrorsTotal increases the counter for this error atomically by one.

func (Metrics) IncRequestsTotal

func (m Metrics) IncRequestsTotal(method string)

incRequestsTotal increases the counter for this request method atomically by one. The method must be one of GET, HEAD, POST, PATCH, DELETE.

func (Metrics) IncUploadsCreated

func (m Metrics) IncUploadsCreated()

incUploadsCreated increases the counter for completed uploads atomically by one.

func (Metrics) IncUploadsFinished

func (m Metrics) IncUploadsFinished()

incUploadsFinished increases the counter for finished uploads atomically by one.

func (Metrics) IncUploadsTerminated

func (m Metrics) IncUploadsTerminated()

incUploadsTerminated increases the counter for completed uploads atomically by one.

type StoreComposer

type StoreComposer struct {
	Core DataStore

	UsesTerminater     bool
	Terminater         TerminaterDataStore
	UsesLocker         bool
	Locker             Locker
	UsesConcater       bool
	Concater           ConcaterDataStore
	UsesLengthDeferrer bool
	LengthDeferrer     LengthDeferrerDataStore
}

StoreComposer represents a composable data store. It consists of the core data store and optional extensions. Please consult the package's overview for a more detailed introduction in how to use this structure.

func NewStoreComposer

func NewStoreComposer() *StoreComposer

NewStoreComposer creates a new and empty store composer.

func (*StoreComposer) Capabilities

func (store *StoreComposer) Capabilities() string

Capabilities returns a string representing the provided extensions in a human-readable format meant for debugging.

func (*StoreComposer) UseConcater

func (store *StoreComposer) UseConcater(ext ConcaterDataStore)

func (*StoreComposer) UseCore

func (store *StoreComposer) UseCore(core DataStore)

UseCore will set the used core data store. If the argument is nil, the property will be unset.

func (*StoreComposer) UseLengthDeferrer

func (store *StoreComposer) UseLengthDeferrer(ext LengthDeferrerDataStore)

func (*StoreComposer) UseLocker

func (store *StoreComposer) UseLocker(ext Locker)

func (*StoreComposer) UseTerminater

func (store *StoreComposer) UseTerminater(ext TerminaterDataStore)

type TerminatableUpload

type TerminatableUpload interface {
	// Terminate an upload so any further requests to the upload resource will
	// return the ErrNotFound error.
	Terminate(ctx context.Context) error
}

type TerminaterDataStore

type TerminaterDataStore interface {
	AsTerminatableUpload(upload Upload) TerminatableUpload
}

TerminaterDataStore is the interface which must be implemented by DataStores if they want to receive DELETE requests using the Handler. If this interface is not implemented, no request handler for this method is attached.

type Upload

type Upload interface {
	// Write the chunk read from src into the file specified by the id at the
	// given offset. The handler will take care of validating the offset and
	// limiting the size of the src to not overflow the file's size.
	// The handler will also lock resources while they are written to ensure only one
	// write happens per time.
	// The function call must return the number of bytes written.
	WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error)
	// Read the fileinformation used to validate the offset and respond to HEAD
	// requests.
	GetInfo(ctx context.Context) (FileInfo, error)
	// GetReader returns an io.ReadCloser which allows iterating of the content of an
	// upload. It should attempt to provide a reader even if the upload has not
	// been finished yet but it's not required.
	GetReader(ctx context.Context) (io.ReadCloser, error)
	// FinisherDataStore is the interface which can be implemented by DataStores
	// which need to do additional operations once an entire upload has been
	// completed. These tasks may include but are not limited to freeing unused
	// resources or notifying other services. For example, S3Store uses this
	// interface for removing a temporary object.
	FinishUpload(ctx context.Context) error
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL
JackTT - Gopher 🇻🇳