[145] | 1 | package fasthttp
|
---|
| 2 |
|
---|
| 3 | import (
|
---|
| 4 | "bytes"
|
---|
| 5 | "errors"
|
---|
| 6 | "fmt"
|
---|
| 7 | "html"
|
---|
| 8 | "io"
|
---|
| 9 | "io/ioutil"
|
---|
| 10 | "mime"
|
---|
| 11 | "net/http"
|
---|
| 12 | "os"
|
---|
| 13 | "path/filepath"
|
---|
| 14 | "sort"
|
---|
| 15 | "strings"
|
---|
| 16 | "sync"
|
---|
| 17 | "time"
|
---|
| 18 |
|
---|
| 19 | "github.com/andybalholm/brotli"
|
---|
| 20 | "github.com/klauspost/compress/gzip"
|
---|
| 21 | "github.com/valyala/bytebufferpool"
|
---|
| 22 | )
|
---|
| 23 |
|
---|
| 24 | // ServeFileBytesUncompressed returns HTTP response containing file contents
|
---|
| 25 | // from the given path.
|
---|
| 26 | //
|
---|
| 27 | // Directory contents is returned if path points to directory.
|
---|
| 28 | //
|
---|
| 29 | // ServeFileBytes may be used for saving network traffic when serving files
|
---|
| 30 | // with good compression ratio.
|
---|
| 31 | //
|
---|
| 32 | // See also RequestCtx.SendFileBytes.
|
---|
| 33 | //
|
---|
| 34 | // WARNING: do not pass any user supplied paths to this function!
|
---|
| 35 | // WARNING: if path is based on user input users will be able to request
|
---|
| 36 | // any file on your filesystem! Use fasthttp.FS with a sane Root instead.
|
---|
| 37 | func ServeFileBytesUncompressed(ctx *RequestCtx, path []byte) {
|
---|
| 38 | ServeFileUncompressed(ctx, b2s(path))
|
---|
| 39 | }
|
---|
| 40 |
|
---|
| 41 | // ServeFileUncompressed returns HTTP response containing file contents
|
---|
| 42 | // from the given path.
|
---|
| 43 | //
|
---|
| 44 | // Directory contents is returned if path points to directory.
|
---|
| 45 | //
|
---|
| 46 | // ServeFile may be used for saving network traffic when serving files
|
---|
| 47 | // with good compression ratio.
|
---|
| 48 | //
|
---|
| 49 | // See also RequestCtx.SendFile.
|
---|
| 50 | //
|
---|
| 51 | // WARNING: do not pass any user supplied paths to this function!
|
---|
| 52 | // WARNING: if path is based on user input users will be able to request
|
---|
| 53 | // any file on your filesystem! Use fasthttp.FS with a sane Root instead.
|
---|
| 54 | func ServeFileUncompressed(ctx *RequestCtx, path string) {
|
---|
| 55 | ctx.Request.Header.DelBytes(strAcceptEncoding)
|
---|
| 56 | ServeFile(ctx, path)
|
---|
| 57 | }
|
---|
| 58 |
|
---|
| 59 | // ServeFileBytes returns HTTP response containing compressed file contents
|
---|
| 60 | // from the given path.
|
---|
| 61 | //
|
---|
| 62 | // HTTP response may contain uncompressed file contents in the following cases:
|
---|
| 63 | //
|
---|
| 64 | // * Missing 'Accept-Encoding: gzip' request header.
|
---|
| 65 | // * No write access to directory containing the file.
|
---|
| 66 | //
|
---|
| 67 | // Directory contents is returned if path points to directory.
|
---|
| 68 | //
|
---|
| 69 | // Use ServeFileBytesUncompressed is you don't need serving compressed
|
---|
| 70 | // file contents.
|
---|
| 71 | //
|
---|
| 72 | // See also RequestCtx.SendFileBytes.
|
---|
| 73 | //
|
---|
| 74 | // WARNING: do not pass any user supplied paths to this function!
|
---|
| 75 | // WARNING: if path is based on user input users will be able to request
|
---|
| 76 | // any file on your filesystem! Use fasthttp.FS with a sane Root instead.
|
---|
| 77 | func ServeFileBytes(ctx *RequestCtx, path []byte) {
|
---|
| 78 | ServeFile(ctx, b2s(path))
|
---|
| 79 | }
|
---|
| 80 |
|
---|
| 81 | // ServeFile returns HTTP response containing compressed file contents
|
---|
| 82 | // from the given path.
|
---|
| 83 | //
|
---|
| 84 | // HTTP response may contain uncompressed file contents in the following cases:
|
---|
| 85 | //
|
---|
| 86 | // * Missing 'Accept-Encoding: gzip' request header.
|
---|
| 87 | // * No write access to directory containing the file.
|
---|
| 88 | //
|
---|
| 89 | // Directory contents is returned if path points to directory.
|
---|
| 90 | //
|
---|
| 91 | // Use ServeFileUncompressed is you don't need serving compressed file contents.
|
---|
| 92 | //
|
---|
| 93 | // See also RequestCtx.SendFile.
|
---|
| 94 | //
|
---|
| 95 | // WARNING: do not pass any user supplied paths to this function!
|
---|
| 96 | // WARNING: if path is based on user input users will be able to request
|
---|
| 97 | // any file on your filesystem! Use fasthttp.FS with a sane Root instead.
|
---|
| 98 | func ServeFile(ctx *RequestCtx, path string) {
|
---|
| 99 | rootFSOnce.Do(func() {
|
---|
| 100 | rootFSHandler = rootFS.NewRequestHandler()
|
---|
| 101 | })
|
---|
| 102 | if len(path) == 0 || path[0] != '/' {
|
---|
| 103 | // extend relative path to absolute path
|
---|
| 104 | hasTrailingSlash := len(path) > 0 && path[len(path)-1] == '/'
|
---|
| 105 | var err error
|
---|
| 106 | if path, err = filepath.Abs(path); err != nil {
|
---|
| 107 | ctx.Logger().Printf("cannot resolve path %q to absolute file path: %s", path, err)
|
---|
| 108 | ctx.Error("Internal Server Error", StatusInternalServerError)
|
---|
| 109 | return
|
---|
| 110 | }
|
---|
| 111 | if hasTrailingSlash {
|
---|
| 112 | path += "/"
|
---|
| 113 | }
|
---|
| 114 | }
|
---|
| 115 | ctx.Request.SetRequestURI(path)
|
---|
| 116 | rootFSHandler(ctx)
|
---|
| 117 | }
|
---|
| 118 |
|
---|
| 119 | var (
|
---|
| 120 | rootFSOnce sync.Once
|
---|
| 121 | rootFS = &FS{
|
---|
| 122 | Root: "/",
|
---|
| 123 | GenerateIndexPages: true,
|
---|
| 124 | Compress: true,
|
---|
| 125 | CompressBrotli: true,
|
---|
| 126 | AcceptByteRange: true,
|
---|
| 127 | }
|
---|
| 128 | rootFSHandler RequestHandler
|
---|
| 129 | )
|
---|
| 130 |
|
---|
| 131 | // PathRewriteFunc must return new request path based on arbitrary ctx
|
---|
| 132 | // info such as ctx.Path().
|
---|
| 133 | //
|
---|
| 134 | // Path rewriter is used in FS for translating the current request
|
---|
| 135 | // to the local filesystem path relative to FS.Root.
|
---|
| 136 | //
|
---|
| 137 | // The returned path must not contain '/../' substrings due to security reasons,
|
---|
| 138 | // since such paths may refer files outside FS.Root.
|
---|
| 139 | //
|
---|
| 140 | // The returned path may refer to ctx members. For example, ctx.Path().
|
---|
| 141 | type PathRewriteFunc func(ctx *RequestCtx) []byte
|
---|
| 142 |
|
---|
| 143 | // NewVHostPathRewriter returns path rewriter, which strips slashesCount
|
---|
| 144 | // leading slashes from the path and prepends the path with request's host,
|
---|
| 145 | // thus simplifying virtual hosting for static files.
|
---|
| 146 | //
|
---|
| 147 | // Examples:
|
---|
| 148 | //
|
---|
| 149 | // * host=foobar.com, slashesCount=0, original path="/foo/bar".
|
---|
| 150 | // Resulting path: "/foobar.com/foo/bar"
|
---|
| 151 | //
|
---|
| 152 | // * host=img.aaa.com, slashesCount=1, original path="/images/123/456.jpg"
|
---|
| 153 | // Resulting path: "/img.aaa.com/123/456.jpg"
|
---|
| 154 | //
|
---|
| 155 | func NewVHostPathRewriter(slashesCount int) PathRewriteFunc {
|
---|
| 156 | return func(ctx *RequestCtx) []byte {
|
---|
| 157 | path := stripLeadingSlashes(ctx.Path(), slashesCount)
|
---|
| 158 | host := ctx.Host()
|
---|
| 159 | if n := bytes.IndexByte(host, '/'); n >= 0 {
|
---|
| 160 | host = nil
|
---|
| 161 | }
|
---|
| 162 | if len(host) == 0 {
|
---|
| 163 | host = strInvalidHost
|
---|
| 164 | }
|
---|
| 165 | b := bytebufferpool.Get()
|
---|
| 166 | b.B = append(b.B, '/')
|
---|
| 167 | b.B = append(b.B, host...)
|
---|
| 168 | b.B = append(b.B, path...)
|
---|
| 169 | ctx.URI().SetPathBytes(b.B)
|
---|
| 170 | bytebufferpool.Put(b)
|
---|
| 171 |
|
---|
| 172 | return ctx.Path()
|
---|
| 173 | }
|
---|
| 174 | }
|
---|
| 175 |
|
---|
| 176 | var strInvalidHost = []byte("invalid-host")
|
---|
| 177 |
|
---|
| 178 | // NewPathSlashesStripper returns path rewriter, which strips slashesCount
|
---|
| 179 | // leading slashes from the path.
|
---|
| 180 | //
|
---|
| 181 | // Examples:
|
---|
| 182 | //
|
---|
| 183 | // * slashesCount = 0, original path: "/foo/bar", result: "/foo/bar"
|
---|
| 184 | // * slashesCount = 1, original path: "/foo/bar", result: "/bar"
|
---|
| 185 | // * slashesCount = 2, original path: "/foo/bar", result: ""
|
---|
| 186 | //
|
---|
| 187 | // The returned path rewriter may be used as FS.PathRewrite .
|
---|
| 188 | func NewPathSlashesStripper(slashesCount int) PathRewriteFunc {
|
---|
| 189 | return func(ctx *RequestCtx) []byte {
|
---|
| 190 | return stripLeadingSlashes(ctx.Path(), slashesCount)
|
---|
| 191 | }
|
---|
| 192 | }
|
---|
| 193 |
|
---|
| 194 | // NewPathPrefixStripper returns path rewriter, which removes prefixSize bytes
|
---|
| 195 | // from the path prefix.
|
---|
| 196 | //
|
---|
| 197 | // Examples:
|
---|
| 198 | //
|
---|
| 199 | // * prefixSize = 0, original path: "/foo/bar", result: "/foo/bar"
|
---|
| 200 | // * prefixSize = 3, original path: "/foo/bar", result: "o/bar"
|
---|
| 201 | // * prefixSize = 7, original path: "/foo/bar", result: "r"
|
---|
| 202 | //
|
---|
| 203 | // The returned path rewriter may be used as FS.PathRewrite .
|
---|
| 204 | func NewPathPrefixStripper(prefixSize int) PathRewriteFunc {
|
---|
| 205 | return func(ctx *RequestCtx) []byte {
|
---|
| 206 | path := ctx.Path()
|
---|
| 207 | if len(path) >= prefixSize {
|
---|
| 208 | path = path[prefixSize:]
|
---|
| 209 | }
|
---|
| 210 | return path
|
---|
| 211 | }
|
---|
| 212 | }
|
---|
| 213 |
|
---|
| 214 | // FS represents settings for request handler serving static files
|
---|
| 215 | // from the local filesystem.
|
---|
| 216 | //
|
---|
| 217 | // It is prohibited copying FS values. Create new values instead.
|
---|
| 218 | type FS struct {
|
---|
| 219 | noCopy noCopy //nolint:unused,structcheck
|
---|
| 220 |
|
---|
| 221 | // Path to the root directory to serve files from.
|
---|
| 222 | Root string
|
---|
| 223 |
|
---|
| 224 | // List of index file names to try opening during directory access.
|
---|
| 225 | //
|
---|
| 226 | // For example:
|
---|
| 227 | //
|
---|
| 228 | // * index.html
|
---|
| 229 | // * index.htm
|
---|
| 230 | // * my-super-index.xml
|
---|
| 231 | //
|
---|
| 232 | // By default the list is empty.
|
---|
| 233 | IndexNames []string
|
---|
| 234 |
|
---|
| 235 | // Index pages for directories without files matching IndexNames
|
---|
| 236 | // are automatically generated if set.
|
---|
| 237 | //
|
---|
| 238 | // Directory index generation may be quite slow for directories
|
---|
| 239 | // with many files (more than 1K), so it is discouraged enabling
|
---|
| 240 | // index pages' generation for such directories.
|
---|
| 241 | //
|
---|
| 242 | // By default index pages aren't generated.
|
---|
| 243 | GenerateIndexPages bool
|
---|
| 244 |
|
---|
| 245 | // Transparently compresses responses if set to true.
|
---|
| 246 | //
|
---|
| 247 | // The server tries minimizing CPU usage by caching compressed files.
|
---|
| 248 | // It adds CompressedFileSuffix suffix to the original file name and
|
---|
| 249 | // tries saving the resulting compressed file under the new file name.
|
---|
| 250 | // So it is advisable to give the server write access to Root
|
---|
| 251 | // and to all inner folders in order to minimize CPU usage when serving
|
---|
| 252 | // compressed responses.
|
---|
| 253 | //
|
---|
| 254 | // Transparent compression is disabled by default.
|
---|
| 255 | Compress bool
|
---|
| 256 |
|
---|
| 257 | // Uses brotli encoding and fallbacks to gzip in responses if set to true, uses gzip if set to false.
|
---|
| 258 | //
|
---|
| 259 | // This value has sense only if Compress is set.
|
---|
| 260 | //
|
---|
| 261 | // Brotli encoding is disabled by default.
|
---|
| 262 | CompressBrotli bool
|
---|
| 263 |
|
---|
| 264 | // Enables byte range requests if set to true.
|
---|
| 265 | //
|
---|
| 266 | // Byte range requests are disabled by default.
|
---|
| 267 | AcceptByteRange bool
|
---|
| 268 |
|
---|
| 269 | // Path rewriting function.
|
---|
| 270 | //
|
---|
| 271 | // By default request path is not modified.
|
---|
| 272 | PathRewrite PathRewriteFunc
|
---|
| 273 |
|
---|
| 274 | // PathNotFound fires when file is not found in filesystem
|
---|
| 275 | // this functions tries to replace "Cannot open requested path"
|
---|
| 276 | // server response giving to the programmer the control of server flow.
|
---|
| 277 | //
|
---|
| 278 | // By default PathNotFound returns
|
---|
| 279 | // "Cannot open requested path"
|
---|
| 280 | PathNotFound RequestHandler
|
---|
| 281 |
|
---|
| 282 | // Expiration duration for inactive file handlers.
|
---|
| 283 | //
|
---|
| 284 | // FSHandlerCacheDuration is used by default.
|
---|
| 285 | CacheDuration time.Duration
|
---|
| 286 |
|
---|
| 287 | // Suffix to add to the name of cached compressed file.
|
---|
| 288 | //
|
---|
| 289 | // This value has sense only if Compress is set.
|
---|
| 290 | //
|
---|
| 291 | // FSCompressedFileSuffix is used by default.
|
---|
| 292 | CompressedFileSuffix string
|
---|
| 293 |
|
---|
| 294 | // Suffixes list to add to compressedFileSuffix depending on encoding
|
---|
| 295 | //
|
---|
| 296 | // This value has sense only if Compress is set.
|
---|
| 297 | //
|
---|
| 298 | // FSCompressedFileSuffixes is used by default.
|
---|
| 299 | CompressedFileSuffixes map[string]string
|
---|
| 300 |
|
---|
| 301 | // If CleanStop is set, the channel can be closed to stop the cleanup handlers
|
---|
| 302 | // for the FS RequestHandlers created with NewRequestHandler.
|
---|
| 303 | // NEVER close this channel while the handler is still being used!
|
---|
| 304 | CleanStop chan struct{}
|
---|
| 305 |
|
---|
| 306 | once sync.Once
|
---|
| 307 | h RequestHandler
|
---|
| 308 | }
|
---|
| 309 |
|
---|
| 310 | // FSCompressedFileSuffix is the suffix FS adds to the original file names
|
---|
| 311 | // when trying to store compressed file under the new file name.
|
---|
| 312 | // See FS.Compress for details.
|
---|
| 313 | const FSCompressedFileSuffix = ".fasthttp.gz"
|
---|
| 314 |
|
---|
| 315 | // FSCompressedFileSuffixes is the suffixes FS adds to the original file names depending on encoding
|
---|
| 316 | // when trying to store compressed file under the new file name.
|
---|
| 317 | // See FS.Compress for details.
|
---|
| 318 | var FSCompressedFileSuffixes = map[string]string{
|
---|
| 319 | "gzip": ".fasthttp.gz",
|
---|
| 320 | "br": ".fasthttp.br",
|
---|
| 321 | }
|
---|
| 322 |
|
---|
| 323 | // FSHandlerCacheDuration is the default expiration duration for inactive
|
---|
| 324 | // file handlers opened by FS.
|
---|
| 325 | const FSHandlerCacheDuration = 10 * time.Second
|
---|
| 326 |
|
---|
| 327 | // FSHandler returns request handler serving static files from
|
---|
| 328 | // the given root folder.
|
---|
| 329 | //
|
---|
| 330 | // stripSlashes indicates how many leading slashes must be stripped
|
---|
| 331 | // from requested path before searching requested file in the root folder.
|
---|
| 332 | // Examples:
|
---|
| 333 | //
|
---|
| 334 | // * stripSlashes = 0, original path: "/foo/bar", result: "/foo/bar"
|
---|
| 335 | // * stripSlashes = 1, original path: "/foo/bar", result: "/bar"
|
---|
| 336 | // * stripSlashes = 2, original path: "/foo/bar", result: ""
|
---|
| 337 | //
|
---|
| 338 | // The returned request handler automatically generates index pages
|
---|
| 339 | // for directories without index.html.
|
---|
| 340 | //
|
---|
| 341 | // The returned handler caches requested file handles
|
---|
| 342 | // for FSHandlerCacheDuration.
|
---|
| 343 | // Make sure your program has enough 'max open files' limit aka
|
---|
| 344 | // 'ulimit -n' if root folder contains many files.
|
---|
| 345 | //
|
---|
| 346 | // Do not create multiple request handler instances for the same
|
---|
| 347 | // (root, stripSlashes) arguments - just reuse a single instance.
|
---|
| 348 | // Otherwise goroutine leak will occur.
|
---|
| 349 | func FSHandler(root string, stripSlashes int) RequestHandler {
|
---|
| 350 | fs := &FS{
|
---|
| 351 | Root: root,
|
---|
| 352 | IndexNames: []string{"index.html"},
|
---|
| 353 | GenerateIndexPages: true,
|
---|
| 354 | AcceptByteRange: true,
|
---|
| 355 | }
|
---|
| 356 | if stripSlashes > 0 {
|
---|
| 357 | fs.PathRewrite = NewPathSlashesStripper(stripSlashes)
|
---|
| 358 | }
|
---|
| 359 | return fs.NewRequestHandler()
|
---|
| 360 | }
|
---|
| 361 |
|
---|
| 362 | // NewRequestHandler returns new request handler with the given FS settings.
|
---|
| 363 | //
|
---|
| 364 | // The returned handler caches requested file handles
|
---|
| 365 | // for FS.CacheDuration.
|
---|
| 366 | // Make sure your program has enough 'max open files' limit aka
|
---|
| 367 | // 'ulimit -n' if FS.Root folder contains many files.
|
---|
| 368 | //
|
---|
| 369 | // Do not create multiple request handlers from a single FS instance -
|
---|
| 370 | // just reuse a single request handler.
|
---|
| 371 | func (fs *FS) NewRequestHandler() RequestHandler {
|
---|
| 372 | fs.once.Do(fs.initRequestHandler)
|
---|
| 373 | return fs.h
|
---|
| 374 | }
|
---|
| 375 |
|
---|
| 376 | func (fs *FS) initRequestHandler() {
|
---|
| 377 | root := fs.Root
|
---|
| 378 |
|
---|
| 379 | // serve files from the current working directory if root is empty
|
---|
| 380 | if len(root) == 0 {
|
---|
| 381 | root = "."
|
---|
| 382 | }
|
---|
| 383 |
|
---|
| 384 | // strip trailing slashes from the root path
|
---|
| 385 | for len(root) > 0 && root[len(root)-1] == '/' {
|
---|
| 386 | root = root[:len(root)-1]
|
---|
| 387 | }
|
---|
| 388 |
|
---|
| 389 | cacheDuration := fs.CacheDuration
|
---|
| 390 | if cacheDuration <= 0 {
|
---|
| 391 | cacheDuration = FSHandlerCacheDuration
|
---|
| 392 | }
|
---|
| 393 |
|
---|
| 394 | compressedFileSuffixes := fs.CompressedFileSuffixes
|
---|
| 395 | if len(compressedFileSuffixes["br"]) == 0 || len(compressedFileSuffixes["gzip"]) == 0 ||
|
---|
| 396 | compressedFileSuffixes["br"] == compressedFileSuffixes["gzip"] {
|
---|
| 397 | compressedFileSuffixes = FSCompressedFileSuffixes
|
---|
| 398 | }
|
---|
| 399 |
|
---|
| 400 | if len(fs.CompressedFileSuffix) > 0 {
|
---|
| 401 | compressedFileSuffixes["gzip"] = fs.CompressedFileSuffix
|
---|
| 402 | compressedFileSuffixes["br"] = FSCompressedFileSuffixes["br"]
|
---|
| 403 | }
|
---|
| 404 |
|
---|
| 405 | h := &fsHandler{
|
---|
| 406 | root: root,
|
---|
| 407 | indexNames: fs.IndexNames,
|
---|
| 408 | pathRewrite: fs.PathRewrite,
|
---|
| 409 | generateIndexPages: fs.GenerateIndexPages,
|
---|
| 410 | compress: fs.Compress,
|
---|
| 411 | compressBrotli: fs.CompressBrotli,
|
---|
| 412 | pathNotFound: fs.PathNotFound,
|
---|
| 413 | acceptByteRange: fs.AcceptByteRange,
|
---|
| 414 | cacheDuration: cacheDuration,
|
---|
| 415 | compressedFileSuffixes: compressedFileSuffixes,
|
---|
| 416 | cache: make(map[string]*fsFile),
|
---|
| 417 | cacheBrotli: make(map[string]*fsFile),
|
---|
| 418 | cacheGzip: make(map[string]*fsFile),
|
---|
| 419 | }
|
---|
| 420 |
|
---|
| 421 | go func() {
|
---|
| 422 | var pendingFiles []*fsFile
|
---|
| 423 |
|
---|
| 424 | clean := func() {
|
---|
| 425 | pendingFiles = h.cleanCache(pendingFiles)
|
---|
| 426 | }
|
---|
| 427 |
|
---|
| 428 | if fs.CleanStop != nil {
|
---|
| 429 | t := time.NewTicker(cacheDuration / 2)
|
---|
| 430 | for {
|
---|
| 431 | select {
|
---|
| 432 | case <-t.C:
|
---|
| 433 | clean()
|
---|
| 434 | case _, stillOpen := <-fs.CleanStop:
|
---|
| 435 | // Ignore values send on the channel, only stop when it is closed.
|
---|
| 436 | if !stillOpen {
|
---|
| 437 | t.Stop()
|
---|
| 438 | return
|
---|
| 439 | }
|
---|
| 440 | }
|
---|
| 441 | }
|
---|
| 442 | }
|
---|
| 443 | for {
|
---|
| 444 | time.Sleep(cacheDuration / 2)
|
---|
| 445 | clean()
|
---|
| 446 | }
|
---|
| 447 | }()
|
---|
| 448 |
|
---|
| 449 | fs.h = h.handleRequest
|
---|
| 450 | }
|
---|
| 451 |
|
---|
| 452 | type fsHandler struct {
|
---|
| 453 | root string
|
---|
| 454 | indexNames []string
|
---|
| 455 | pathRewrite PathRewriteFunc
|
---|
| 456 | pathNotFound RequestHandler
|
---|
| 457 | generateIndexPages bool
|
---|
| 458 | compress bool
|
---|
| 459 | compressBrotli bool
|
---|
| 460 | acceptByteRange bool
|
---|
| 461 | cacheDuration time.Duration
|
---|
| 462 | compressedFileSuffixes map[string]string
|
---|
| 463 |
|
---|
| 464 | cache map[string]*fsFile
|
---|
| 465 | cacheBrotli map[string]*fsFile
|
---|
| 466 | cacheGzip map[string]*fsFile
|
---|
| 467 | cacheLock sync.Mutex
|
---|
| 468 |
|
---|
| 469 | smallFileReaderPool sync.Pool
|
---|
| 470 | }
|
---|
| 471 |
|
---|
| 472 | type fsFile struct {
|
---|
| 473 | h *fsHandler
|
---|
| 474 | f *os.File
|
---|
| 475 | dirIndex []byte
|
---|
| 476 | contentType string
|
---|
| 477 | contentLength int
|
---|
| 478 | compressed bool
|
---|
| 479 |
|
---|
| 480 | lastModified time.Time
|
---|
| 481 | lastModifiedStr []byte
|
---|
| 482 |
|
---|
| 483 | t time.Time
|
---|
| 484 | readersCount int
|
---|
| 485 |
|
---|
| 486 | bigFiles []*bigFileReader
|
---|
| 487 | bigFilesLock sync.Mutex
|
---|
| 488 | }
|
---|
| 489 |
|
---|
| 490 | func (ff *fsFile) NewReader() (io.Reader, error) {
|
---|
| 491 | if ff.isBig() {
|
---|
| 492 | r, err := ff.bigFileReader()
|
---|
| 493 | if err != nil {
|
---|
| 494 | ff.decReadersCount()
|
---|
| 495 | }
|
---|
| 496 | return r, err
|
---|
| 497 | }
|
---|
| 498 | return ff.smallFileReader(), nil
|
---|
| 499 | }
|
---|
| 500 |
|
---|
| 501 | func (ff *fsFile) smallFileReader() io.Reader {
|
---|
| 502 | v := ff.h.smallFileReaderPool.Get()
|
---|
| 503 | if v == nil {
|
---|
| 504 | v = &fsSmallFileReader{}
|
---|
| 505 | }
|
---|
| 506 | r := v.(*fsSmallFileReader)
|
---|
| 507 | r.ff = ff
|
---|
| 508 | r.endPos = ff.contentLength
|
---|
| 509 | if r.startPos > 0 {
|
---|
| 510 | panic("BUG: fsSmallFileReader with non-nil startPos found in the pool")
|
---|
| 511 | }
|
---|
| 512 | return r
|
---|
| 513 | }
|
---|
| 514 |
|
---|
| 515 | // files bigger than this size are sent with sendfile
|
---|
| 516 | const maxSmallFileSize = 2 * 4096
|
---|
| 517 |
|
---|
| 518 | func (ff *fsFile) isBig() bool {
|
---|
| 519 | return ff.contentLength > maxSmallFileSize && len(ff.dirIndex) == 0
|
---|
| 520 | }
|
---|
| 521 |
|
---|
| 522 | func (ff *fsFile) bigFileReader() (io.Reader, error) {
|
---|
| 523 | if ff.f == nil {
|
---|
| 524 | panic("BUG: ff.f must be non-nil in bigFileReader")
|
---|
| 525 | }
|
---|
| 526 |
|
---|
| 527 | var r io.Reader
|
---|
| 528 |
|
---|
| 529 | ff.bigFilesLock.Lock()
|
---|
| 530 | n := len(ff.bigFiles)
|
---|
| 531 | if n > 0 {
|
---|
| 532 | r = ff.bigFiles[n-1]
|
---|
| 533 | ff.bigFiles = ff.bigFiles[:n-1]
|
---|
| 534 | }
|
---|
| 535 | ff.bigFilesLock.Unlock()
|
---|
| 536 |
|
---|
| 537 | if r != nil {
|
---|
| 538 | return r, nil
|
---|
| 539 | }
|
---|
| 540 |
|
---|
| 541 | f, err := os.Open(ff.f.Name())
|
---|
| 542 | if err != nil {
|
---|
| 543 | return nil, fmt.Errorf("cannot open already opened file: %w", err)
|
---|
| 544 | }
|
---|
| 545 | return &bigFileReader{
|
---|
| 546 | f: f,
|
---|
| 547 | ff: ff,
|
---|
| 548 | r: f,
|
---|
| 549 | }, nil
|
---|
| 550 | }
|
---|
| 551 |
|
---|
| 552 | func (ff *fsFile) Release() {
|
---|
| 553 | if ff.f != nil {
|
---|
| 554 | ff.f.Close()
|
---|
| 555 |
|
---|
| 556 | if ff.isBig() {
|
---|
| 557 | ff.bigFilesLock.Lock()
|
---|
| 558 | for _, r := range ff.bigFiles {
|
---|
| 559 | r.f.Close()
|
---|
| 560 | }
|
---|
| 561 | ff.bigFilesLock.Unlock()
|
---|
| 562 | }
|
---|
| 563 | }
|
---|
| 564 | }
|
---|
| 565 |
|
---|
| 566 | func (ff *fsFile) decReadersCount() {
|
---|
| 567 | ff.h.cacheLock.Lock()
|
---|
| 568 | ff.readersCount--
|
---|
| 569 | if ff.readersCount < 0 {
|
---|
| 570 | panic("BUG: negative fsFile.readersCount!")
|
---|
| 571 | }
|
---|
| 572 | ff.h.cacheLock.Unlock()
|
---|
| 573 | }
|
---|
| 574 |
|
---|
| 575 | // bigFileReader attempts to trigger sendfile
|
---|
| 576 | // for sending big files over the wire.
|
---|
| 577 | type bigFileReader struct {
|
---|
| 578 | f *os.File
|
---|
| 579 | ff *fsFile
|
---|
| 580 | r io.Reader
|
---|
| 581 | lr io.LimitedReader
|
---|
| 582 | }
|
---|
| 583 |
|
---|
| 584 | func (r *bigFileReader) UpdateByteRange(startPos, endPos int) error {
|
---|
| 585 | if _, err := r.f.Seek(int64(startPos), 0); err != nil {
|
---|
| 586 | return err
|
---|
| 587 | }
|
---|
| 588 | r.r = &r.lr
|
---|
| 589 | r.lr.R = r.f
|
---|
| 590 | r.lr.N = int64(endPos - startPos + 1)
|
---|
| 591 | return nil
|
---|
| 592 | }
|
---|
| 593 |
|
---|
| 594 | func (r *bigFileReader) Read(p []byte) (int, error) {
|
---|
| 595 | return r.r.Read(p)
|
---|
| 596 | }
|
---|
| 597 |
|
---|
| 598 | func (r *bigFileReader) WriteTo(w io.Writer) (int64, error) {
|
---|
| 599 | if rf, ok := w.(io.ReaderFrom); ok {
|
---|
| 600 | // fast path. Senfile must be triggered
|
---|
| 601 | return rf.ReadFrom(r.r)
|
---|
| 602 | }
|
---|
| 603 |
|
---|
| 604 | // slow path
|
---|
| 605 | return copyZeroAlloc(w, r.r)
|
---|
| 606 | }
|
---|
| 607 |
|
---|
| 608 | func (r *bigFileReader) Close() error {
|
---|
| 609 | r.r = r.f
|
---|
| 610 | n, err := r.f.Seek(0, 0)
|
---|
| 611 | if err == nil {
|
---|
| 612 | if n != 0 {
|
---|
| 613 | panic("BUG: File.Seek(0,0) returned (non-zero, nil)")
|
---|
| 614 | }
|
---|
| 615 |
|
---|
| 616 | ff := r.ff
|
---|
| 617 | ff.bigFilesLock.Lock()
|
---|
| 618 | ff.bigFiles = append(ff.bigFiles, r)
|
---|
| 619 | ff.bigFilesLock.Unlock()
|
---|
| 620 | } else {
|
---|
| 621 | r.f.Close()
|
---|
| 622 | }
|
---|
| 623 | r.ff.decReadersCount()
|
---|
| 624 | return err
|
---|
| 625 | }
|
---|
| 626 |
|
---|
| 627 | type fsSmallFileReader struct {
|
---|
| 628 | ff *fsFile
|
---|
| 629 | startPos int
|
---|
| 630 | endPos int
|
---|
| 631 | }
|
---|
| 632 |
|
---|
| 633 | func (r *fsSmallFileReader) Close() error {
|
---|
| 634 | ff := r.ff
|
---|
| 635 | ff.decReadersCount()
|
---|
| 636 | r.ff = nil
|
---|
| 637 | r.startPos = 0
|
---|
| 638 | r.endPos = 0
|
---|
| 639 | ff.h.smallFileReaderPool.Put(r)
|
---|
| 640 | return nil
|
---|
| 641 | }
|
---|
| 642 |
|
---|
| 643 | func (r *fsSmallFileReader) UpdateByteRange(startPos, endPos int) error {
|
---|
| 644 | r.startPos = startPos
|
---|
| 645 | r.endPos = endPos + 1
|
---|
| 646 | return nil
|
---|
| 647 | }
|
---|
| 648 |
|
---|
| 649 | func (r *fsSmallFileReader) Read(p []byte) (int, error) {
|
---|
| 650 | tailLen := r.endPos - r.startPos
|
---|
| 651 | if tailLen <= 0 {
|
---|
| 652 | return 0, io.EOF
|
---|
| 653 | }
|
---|
| 654 | if len(p) > tailLen {
|
---|
| 655 | p = p[:tailLen]
|
---|
| 656 | }
|
---|
| 657 |
|
---|
| 658 | ff := r.ff
|
---|
| 659 | if ff.f != nil {
|
---|
| 660 | n, err := ff.f.ReadAt(p, int64(r.startPos))
|
---|
| 661 | r.startPos += n
|
---|
| 662 | return n, err
|
---|
| 663 | }
|
---|
| 664 |
|
---|
| 665 | n := copy(p, ff.dirIndex[r.startPos:])
|
---|
| 666 | r.startPos += n
|
---|
| 667 | return n, nil
|
---|
| 668 | }
|
---|
| 669 |
|
---|
| 670 | func (r *fsSmallFileReader) WriteTo(w io.Writer) (int64, error) {
|
---|
| 671 | ff := r.ff
|
---|
| 672 |
|
---|
| 673 | var n int
|
---|
| 674 | var err error
|
---|
| 675 | if ff.f == nil {
|
---|
| 676 | n, err = w.Write(ff.dirIndex[r.startPos:r.endPos])
|
---|
| 677 | return int64(n), err
|
---|
| 678 | }
|
---|
| 679 |
|
---|
| 680 | if rf, ok := w.(io.ReaderFrom); ok {
|
---|
| 681 | return rf.ReadFrom(r)
|
---|
| 682 | }
|
---|
| 683 |
|
---|
| 684 | curPos := r.startPos
|
---|
| 685 | bufv := copyBufPool.Get()
|
---|
| 686 | buf := bufv.([]byte)
|
---|
| 687 | for err == nil {
|
---|
| 688 | tailLen := r.endPos - curPos
|
---|
| 689 | if tailLen <= 0 {
|
---|
| 690 | break
|
---|
| 691 | }
|
---|
| 692 | if len(buf) > tailLen {
|
---|
| 693 | buf = buf[:tailLen]
|
---|
| 694 | }
|
---|
| 695 | n, err = ff.f.ReadAt(buf, int64(curPos))
|
---|
| 696 | nw, errw := w.Write(buf[:n])
|
---|
| 697 | curPos += nw
|
---|
| 698 | if errw == nil && nw != n {
|
---|
| 699 | panic("BUG: Write(p) returned (n, nil), where n != len(p)")
|
---|
| 700 | }
|
---|
| 701 | if err == nil {
|
---|
| 702 | err = errw
|
---|
| 703 | }
|
---|
| 704 | }
|
---|
| 705 | copyBufPool.Put(bufv)
|
---|
| 706 |
|
---|
| 707 | if err == io.EOF {
|
---|
| 708 | err = nil
|
---|
| 709 | }
|
---|
| 710 | return int64(curPos - r.startPos), err
|
---|
| 711 | }
|
---|
| 712 |
|
---|
| 713 | func (h *fsHandler) cleanCache(pendingFiles []*fsFile) []*fsFile {
|
---|
| 714 | var filesToRelease []*fsFile
|
---|
| 715 |
|
---|
| 716 | h.cacheLock.Lock()
|
---|
| 717 |
|
---|
| 718 | // Close files which couldn't be closed before due to non-zero
|
---|
| 719 | // readers count on the previous run.
|
---|
| 720 | var remainingFiles []*fsFile
|
---|
| 721 | for _, ff := range pendingFiles {
|
---|
| 722 | if ff.readersCount > 0 {
|
---|
| 723 | remainingFiles = append(remainingFiles, ff)
|
---|
| 724 | } else {
|
---|
| 725 | filesToRelease = append(filesToRelease, ff)
|
---|
| 726 | }
|
---|
| 727 | }
|
---|
| 728 | pendingFiles = remainingFiles
|
---|
| 729 |
|
---|
| 730 | pendingFiles, filesToRelease = cleanCacheNolock(h.cache, pendingFiles, filesToRelease, h.cacheDuration)
|
---|
| 731 | pendingFiles, filesToRelease = cleanCacheNolock(h.cacheBrotli, pendingFiles, filesToRelease, h.cacheDuration)
|
---|
| 732 | pendingFiles, filesToRelease = cleanCacheNolock(h.cacheGzip, pendingFiles, filesToRelease, h.cacheDuration)
|
---|
| 733 |
|
---|
| 734 | h.cacheLock.Unlock()
|
---|
| 735 |
|
---|
| 736 | for _, ff := range filesToRelease {
|
---|
| 737 | ff.Release()
|
---|
| 738 | }
|
---|
| 739 |
|
---|
| 740 | return pendingFiles
|
---|
| 741 | }
|
---|
| 742 |
|
---|
| 743 | func cleanCacheNolock(cache map[string]*fsFile, pendingFiles, filesToRelease []*fsFile, cacheDuration time.Duration) ([]*fsFile, []*fsFile) {
|
---|
| 744 | t := time.Now()
|
---|
| 745 | for k, ff := range cache {
|
---|
| 746 | if t.Sub(ff.t) > cacheDuration {
|
---|
| 747 | if ff.readersCount > 0 {
|
---|
| 748 | // There are pending readers on stale file handle,
|
---|
| 749 | // so we cannot close it. Put it into pendingFiles
|
---|
| 750 | // so it will be closed later.
|
---|
| 751 | pendingFiles = append(pendingFiles, ff)
|
---|
| 752 | } else {
|
---|
| 753 | filesToRelease = append(filesToRelease, ff)
|
---|
| 754 | }
|
---|
| 755 | delete(cache, k)
|
---|
| 756 | }
|
---|
| 757 | }
|
---|
| 758 | return pendingFiles, filesToRelease
|
---|
| 759 | }
|
---|
| 760 |
|
---|
| 761 | func (h *fsHandler) handleRequest(ctx *RequestCtx) {
|
---|
| 762 | var path []byte
|
---|
| 763 | if h.pathRewrite != nil {
|
---|
| 764 | path = h.pathRewrite(ctx)
|
---|
| 765 | } else {
|
---|
| 766 | path = ctx.Path()
|
---|
| 767 | }
|
---|
| 768 | hasTrailingSlash := len(path) > 0 && path[len(path)-1] == '/'
|
---|
| 769 | path = stripTrailingSlashes(path)
|
---|
| 770 |
|
---|
| 771 | if n := bytes.IndexByte(path, 0); n >= 0 {
|
---|
| 772 | ctx.Logger().Printf("cannot serve path with nil byte at position %d: %q", n, path)
|
---|
| 773 | ctx.Error("Are you a hacker?", StatusBadRequest)
|
---|
| 774 | return
|
---|
| 775 | }
|
---|
| 776 | if h.pathRewrite != nil {
|
---|
| 777 | // There is no need to check for '/../' if path = ctx.Path(),
|
---|
| 778 | // since ctx.Path must normalize and sanitize the path.
|
---|
| 779 |
|
---|
| 780 | if n := bytes.Index(path, strSlashDotDotSlash); n >= 0 {
|
---|
| 781 | ctx.Logger().Printf("cannot serve path with '/../' at position %d due to security reasons: %q", n, path)
|
---|
| 782 | ctx.Error("Internal Server Error", StatusInternalServerError)
|
---|
| 783 | return
|
---|
| 784 | }
|
---|
| 785 | }
|
---|
| 786 |
|
---|
| 787 | mustCompress := false
|
---|
| 788 | fileCache := h.cache
|
---|
| 789 | fileEncoding := ""
|
---|
| 790 | byteRange := ctx.Request.Header.peek(strRange)
|
---|
| 791 | if len(byteRange) == 0 && h.compress {
|
---|
| 792 | if h.compressBrotli && ctx.Request.Header.HasAcceptEncodingBytes(strBr) {
|
---|
| 793 | mustCompress = true
|
---|
| 794 | fileCache = h.cacheBrotli
|
---|
| 795 | fileEncoding = "br"
|
---|
| 796 | } else if ctx.Request.Header.HasAcceptEncodingBytes(strGzip) {
|
---|
| 797 | mustCompress = true
|
---|
| 798 | fileCache = h.cacheGzip
|
---|
| 799 | fileEncoding = "gzip"
|
---|
| 800 | }
|
---|
| 801 | }
|
---|
| 802 |
|
---|
| 803 | h.cacheLock.Lock()
|
---|
| 804 | ff, ok := fileCache[string(path)]
|
---|
| 805 | if ok {
|
---|
| 806 | ff.readersCount++
|
---|
| 807 | }
|
---|
| 808 | h.cacheLock.Unlock()
|
---|
| 809 |
|
---|
| 810 | if !ok {
|
---|
| 811 | pathStr := string(path)
|
---|
| 812 | filePath := h.root + pathStr
|
---|
| 813 | var err error
|
---|
| 814 | ff, err = h.openFSFile(filePath, mustCompress, fileEncoding)
|
---|
| 815 | if mustCompress && err == errNoCreatePermission {
|
---|
| 816 | ctx.Logger().Printf("insufficient permissions for saving compressed file for %q. Serving uncompressed file. "+
|
---|
| 817 | "Allow write access to the directory with this file in order to improve fasthttp performance", filePath)
|
---|
| 818 | mustCompress = false
|
---|
| 819 | ff, err = h.openFSFile(filePath, mustCompress, fileEncoding)
|
---|
| 820 | }
|
---|
| 821 | if err == errDirIndexRequired {
|
---|
| 822 | if !hasTrailingSlash {
|
---|
| 823 | ctx.RedirectBytes(append(path, '/'), StatusFound)
|
---|
| 824 | return
|
---|
| 825 | }
|
---|
| 826 | ff, err = h.openIndexFile(ctx, filePath, mustCompress, fileEncoding)
|
---|
| 827 | if err != nil {
|
---|
| 828 | ctx.Logger().Printf("cannot open dir index %q: %s", filePath, err)
|
---|
| 829 | ctx.Error("Directory index is forbidden", StatusForbidden)
|
---|
| 830 | return
|
---|
| 831 | }
|
---|
| 832 | } else if err != nil {
|
---|
| 833 | ctx.Logger().Printf("cannot open file %q: %s", filePath, err)
|
---|
| 834 | if h.pathNotFound == nil {
|
---|
| 835 | ctx.Error("Cannot open requested path", StatusNotFound)
|
---|
| 836 | } else {
|
---|
| 837 | ctx.SetStatusCode(StatusNotFound)
|
---|
| 838 | h.pathNotFound(ctx)
|
---|
| 839 | }
|
---|
| 840 | return
|
---|
| 841 | }
|
---|
| 842 |
|
---|
| 843 | h.cacheLock.Lock()
|
---|
| 844 | ff1, ok := fileCache[pathStr]
|
---|
| 845 | if !ok {
|
---|
| 846 | fileCache[pathStr] = ff
|
---|
| 847 | ff.readersCount++
|
---|
| 848 | } else {
|
---|
| 849 | ff1.readersCount++
|
---|
| 850 | }
|
---|
| 851 | h.cacheLock.Unlock()
|
---|
| 852 |
|
---|
| 853 | if ok {
|
---|
| 854 | // The file has been already opened by another
|
---|
| 855 | // goroutine, so close the current file and use
|
---|
| 856 | // the file opened by another goroutine instead.
|
---|
| 857 | ff.Release()
|
---|
| 858 | ff = ff1
|
---|
| 859 | }
|
---|
| 860 | }
|
---|
| 861 |
|
---|
| 862 | if !ctx.IfModifiedSince(ff.lastModified) {
|
---|
| 863 | ff.decReadersCount()
|
---|
| 864 | ctx.NotModified()
|
---|
| 865 | return
|
---|
| 866 | }
|
---|
| 867 |
|
---|
| 868 | r, err := ff.NewReader()
|
---|
| 869 | if err != nil {
|
---|
| 870 | ctx.Logger().Printf("cannot obtain file reader for path=%q: %s", path, err)
|
---|
| 871 | ctx.Error("Internal Server Error", StatusInternalServerError)
|
---|
| 872 | return
|
---|
| 873 | }
|
---|
| 874 |
|
---|
| 875 | hdr := &ctx.Response.Header
|
---|
| 876 | if ff.compressed {
|
---|
| 877 | if fileEncoding == "br" {
|
---|
| 878 | hdr.SetCanonical(strContentEncoding, strBr)
|
---|
| 879 | } else if fileEncoding == "gzip" {
|
---|
| 880 | hdr.SetCanonical(strContentEncoding, strGzip)
|
---|
| 881 | }
|
---|
| 882 | }
|
---|
| 883 |
|
---|
| 884 | statusCode := StatusOK
|
---|
| 885 | contentLength := ff.contentLength
|
---|
| 886 | if h.acceptByteRange {
|
---|
| 887 | hdr.SetCanonical(strAcceptRanges, strBytes)
|
---|
| 888 | if len(byteRange) > 0 {
|
---|
| 889 | startPos, endPos, err := ParseByteRange(byteRange, contentLength)
|
---|
| 890 | if err != nil {
|
---|
| 891 | r.(io.Closer).Close()
|
---|
| 892 | ctx.Logger().Printf("cannot parse byte range %q for path=%q: %s", byteRange, path, err)
|
---|
| 893 | ctx.Error("Range Not Satisfiable", StatusRequestedRangeNotSatisfiable)
|
---|
| 894 | return
|
---|
| 895 | }
|
---|
| 896 |
|
---|
| 897 | if err = r.(byteRangeUpdater).UpdateByteRange(startPos, endPos); err != nil {
|
---|
| 898 | r.(io.Closer).Close()
|
---|
| 899 | ctx.Logger().Printf("cannot seek byte range %q for path=%q: %s", byteRange, path, err)
|
---|
| 900 | ctx.Error("Internal Server Error", StatusInternalServerError)
|
---|
| 901 | return
|
---|
| 902 | }
|
---|
| 903 |
|
---|
| 904 | hdr.SetContentRange(startPos, endPos, contentLength)
|
---|
| 905 | contentLength = endPos - startPos + 1
|
---|
| 906 | statusCode = StatusPartialContent
|
---|
| 907 | }
|
---|
| 908 | }
|
---|
| 909 |
|
---|
| 910 | hdr.SetCanonical(strLastModified, ff.lastModifiedStr)
|
---|
| 911 | if !ctx.IsHead() {
|
---|
| 912 | ctx.SetBodyStream(r, contentLength)
|
---|
| 913 | } else {
|
---|
| 914 | ctx.Response.ResetBody()
|
---|
| 915 | ctx.Response.SkipBody = true
|
---|
| 916 | ctx.Response.Header.SetContentLength(contentLength)
|
---|
| 917 | if rc, ok := r.(io.Closer); ok {
|
---|
| 918 | if err := rc.Close(); err != nil {
|
---|
| 919 | ctx.Logger().Printf("cannot close file reader: %s", err)
|
---|
| 920 | ctx.Error("Internal Server Error", StatusInternalServerError)
|
---|
| 921 | return
|
---|
| 922 | }
|
---|
| 923 | }
|
---|
| 924 | }
|
---|
| 925 | hdr.noDefaultContentType = true
|
---|
| 926 | if len(hdr.ContentType()) == 0 {
|
---|
| 927 | ctx.SetContentType(ff.contentType)
|
---|
| 928 | }
|
---|
| 929 | ctx.SetStatusCode(statusCode)
|
---|
| 930 | }
|
---|
| 931 |
|
---|
| 932 | type byteRangeUpdater interface {
|
---|
| 933 | UpdateByteRange(startPos, endPos int) error
|
---|
| 934 | }
|
---|
| 935 |
|
---|
| 936 | // ParseByteRange parses 'Range: bytes=...' header value.
|
---|
| 937 | //
|
---|
| 938 | // It follows https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 .
|
---|
| 939 | func ParseByteRange(byteRange []byte, contentLength int) (startPos, endPos int, err error) {
|
---|
| 940 | b := byteRange
|
---|
| 941 | if !bytes.HasPrefix(b, strBytes) {
|
---|
| 942 | return 0, 0, fmt.Errorf("unsupported range units: %q. Expecting %q", byteRange, strBytes)
|
---|
| 943 | }
|
---|
| 944 |
|
---|
| 945 | b = b[len(strBytes):]
|
---|
| 946 | if len(b) == 0 || b[0] != '=' {
|
---|
| 947 | return 0, 0, fmt.Errorf("missing byte range in %q", byteRange)
|
---|
| 948 | }
|
---|
| 949 | b = b[1:]
|
---|
| 950 |
|
---|
| 951 | n := bytes.IndexByte(b, '-')
|
---|
| 952 | if n < 0 {
|
---|
| 953 | return 0, 0, fmt.Errorf("missing the end position of byte range in %q", byteRange)
|
---|
| 954 | }
|
---|
| 955 |
|
---|
| 956 | if n == 0 {
|
---|
| 957 | v, err := ParseUint(b[n+1:])
|
---|
| 958 | if err != nil {
|
---|
| 959 | return 0, 0, err
|
---|
| 960 | }
|
---|
| 961 | startPos := contentLength - v
|
---|
| 962 | if startPos < 0 {
|
---|
| 963 | startPos = 0
|
---|
| 964 | }
|
---|
| 965 | return startPos, contentLength - 1, nil
|
---|
| 966 | }
|
---|
| 967 |
|
---|
| 968 | if startPos, err = ParseUint(b[:n]); err != nil {
|
---|
| 969 | return 0, 0, err
|
---|
| 970 | }
|
---|
| 971 | if startPos >= contentLength {
|
---|
| 972 | return 0, 0, fmt.Errorf("the start position of byte range cannot exceed %d. byte range %q", contentLength-1, byteRange)
|
---|
| 973 | }
|
---|
| 974 |
|
---|
| 975 | b = b[n+1:]
|
---|
| 976 | if len(b) == 0 {
|
---|
| 977 | return startPos, contentLength - 1, nil
|
---|
| 978 | }
|
---|
| 979 |
|
---|
| 980 | if endPos, err = ParseUint(b); err != nil {
|
---|
| 981 | return 0, 0, err
|
---|
| 982 | }
|
---|
| 983 | if endPos >= contentLength {
|
---|
| 984 | endPos = contentLength - 1
|
---|
| 985 | }
|
---|
| 986 | if endPos < startPos {
|
---|
| 987 | return 0, 0, fmt.Errorf("the start position of byte range cannot exceed the end position. byte range %q", byteRange)
|
---|
| 988 | }
|
---|
| 989 | return startPos, endPos, nil
|
---|
| 990 | }
|
---|
| 991 |
|
---|
| 992 | func (h *fsHandler) openIndexFile(ctx *RequestCtx, dirPath string, mustCompress bool, fileEncoding string) (*fsFile, error) {
|
---|
| 993 | for _, indexName := range h.indexNames {
|
---|
| 994 | indexFilePath := dirPath + "/" + indexName
|
---|
| 995 | ff, err := h.openFSFile(indexFilePath, mustCompress, fileEncoding)
|
---|
| 996 | if err == nil {
|
---|
| 997 | return ff, nil
|
---|
| 998 | }
|
---|
| 999 | if !os.IsNotExist(err) {
|
---|
| 1000 | return nil, fmt.Errorf("cannot open file %q: %w", indexFilePath, err)
|
---|
| 1001 | }
|
---|
| 1002 | }
|
---|
| 1003 |
|
---|
| 1004 | if !h.generateIndexPages {
|
---|
| 1005 | return nil, fmt.Errorf("cannot access directory without index page. Directory %q", dirPath)
|
---|
| 1006 | }
|
---|
| 1007 |
|
---|
| 1008 | return h.createDirIndex(ctx.URI(), dirPath, mustCompress, fileEncoding)
|
---|
| 1009 | }
|
---|
| 1010 |
|
---|
| 1011 | var (
|
---|
| 1012 | errDirIndexRequired = errors.New("directory index required")
|
---|
| 1013 | errNoCreatePermission = errors.New("no 'create file' permissions")
|
---|
| 1014 | )
|
---|
| 1015 |
|
---|
| 1016 | func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool, fileEncoding string) (*fsFile, error) {
|
---|
| 1017 | w := &bytebufferpool.ByteBuffer{}
|
---|
| 1018 |
|
---|
| 1019 | basePathEscaped := html.EscapeString(string(base.Path()))
|
---|
| 1020 | fmt.Fprintf(w, "<html><head><title>%s</title><style>.dir { font-weight: bold }</style></head><body>", basePathEscaped)
|
---|
| 1021 | fmt.Fprintf(w, "<h1>%s</h1>", basePathEscaped)
|
---|
| 1022 | fmt.Fprintf(w, "<ul>")
|
---|
| 1023 |
|
---|
| 1024 | if len(basePathEscaped) > 1 {
|
---|
| 1025 | var parentURI URI
|
---|
| 1026 | base.CopyTo(&parentURI)
|
---|
| 1027 | parentURI.Update(string(base.Path()) + "/..")
|
---|
| 1028 | parentPathEscaped := html.EscapeString(string(parentURI.Path()))
|
---|
| 1029 | fmt.Fprintf(w, `<li><a href="%s" class="dir">..</a></li>`, parentPathEscaped)
|
---|
| 1030 | }
|
---|
| 1031 |
|
---|
| 1032 | f, err := os.Open(dirPath)
|
---|
| 1033 | if err != nil {
|
---|
| 1034 | return nil, err
|
---|
| 1035 | }
|
---|
| 1036 |
|
---|
| 1037 | fileinfos, err := f.Readdir(0)
|
---|
| 1038 | f.Close()
|
---|
| 1039 | if err != nil {
|
---|
| 1040 | return nil, err
|
---|
| 1041 | }
|
---|
| 1042 |
|
---|
| 1043 | fm := make(map[string]os.FileInfo, len(fileinfos))
|
---|
| 1044 | filenames := make([]string, 0, len(fileinfos))
|
---|
| 1045 | nestedContinue:
|
---|
| 1046 | for _, fi := range fileinfos {
|
---|
| 1047 | name := fi.Name()
|
---|
| 1048 | for _, cfs := range h.compressedFileSuffixes {
|
---|
| 1049 | if strings.HasSuffix(name, cfs) {
|
---|
| 1050 | // Do not show compressed files on index page.
|
---|
| 1051 | continue nestedContinue
|
---|
| 1052 | }
|
---|
| 1053 | }
|
---|
| 1054 | fm[name] = fi
|
---|
| 1055 | filenames = append(filenames, name)
|
---|
| 1056 | }
|
---|
| 1057 |
|
---|
| 1058 | var u URI
|
---|
| 1059 | base.CopyTo(&u)
|
---|
| 1060 | u.Update(string(u.Path()) + "/")
|
---|
| 1061 |
|
---|
| 1062 | sort.Strings(filenames)
|
---|
| 1063 | for _, name := range filenames {
|
---|
| 1064 | u.Update(name)
|
---|
| 1065 | pathEscaped := html.EscapeString(string(u.Path()))
|
---|
| 1066 | fi := fm[name]
|
---|
| 1067 | auxStr := "dir"
|
---|
| 1068 | className := "dir"
|
---|
| 1069 | if !fi.IsDir() {
|
---|
| 1070 | auxStr = fmt.Sprintf("file, %d bytes", fi.Size())
|
---|
| 1071 | className = "file"
|
---|
| 1072 | }
|
---|
| 1073 | fmt.Fprintf(w, `<li><a href="%s" class="%s">%s</a>, %s, last modified %s</li>`,
|
---|
| 1074 | pathEscaped, className, html.EscapeString(name), auxStr, fsModTime(fi.ModTime()))
|
---|
| 1075 | }
|
---|
| 1076 |
|
---|
| 1077 | fmt.Fprintf(w, "</ul></body></html>")
|
---|
| 1078 |
|
---|
| 1079 | if mustCompress {
|
---|
| 1080 | var zbuf bytebufferpool.ByteBuffer
|
---|
| 1081 | if fileEncoding == "br" {
|
---|
| 1082 | zbuf.B = AppendBrotliBytesLevel(zbuf.B, w.B, CompressDefaultCompression)
|
---|
| 1083 | } else if fileEncoding == "gzip" {
|
---|
| 1084 | zbuf.B = AppendGzipBytesLevel(zbuf.B, w.B, CompressDefaultCompression)
|
---|
| 1085 | }
|
---|
| 1086 | w = &zbuf
|
---|
| 1087 | }
|
---|
| 1088 |
|
---|
| 1089 | dirIndex := w.B
|
---|
| 1090 | lastModified := time.Now()
|
---|
| 1091 | ff := &fsFile{
|
---|
| 1092 | h: h,
|
---|
| 1093 | dirIndex: dirIndex,
|
---|
| 1094 | contentType: "text/html; charset=utf-8",
|
---|
| 1095 | contentLength: len(dirIndex),
|
---|
| 1096 | compressed: mustCompress,
|
---|
| 1097 | lastModified: lastModified,
|
---|
| 1098 | lastModifiedStr: AppendHTTPDate(nil, lastModified),
|
---|
| 1099 |
|
---|
| 1100 | t: lastModified,
|
---|
| 1101 | }
|
---|
| 1102 | return ff, nil
|
---|
| 1103 | }
|
---|
| 1104 |
|
---|
| 1105 | const (
|
---|
| 1106 | fsMinCompressRatio = 0.8
|
---|
| 1107 | fsMaxCompressibleFileSize = 8 * 1024 * 1024
|
---|
| 1108 | )
|
---|
| 1109 |
|
---|
| 1110 | func (h *fsHandler) compressAndOpenFSFile(filePath string, fileEncoding string) (*fsFile, error) {
|
---|
| 1111 | f, err := os.Open(filePath)
|
---|
| 1112 | if err != nil {
|
---|
| 1113 | return nil, err
|
---|
| 1114 | }
|
---|
| 1115 |
|
---|
| 1116 | fileInfo, err := f.Stat()
|
---|
| 1117 | if err != nil {
|
---|
| 1118 | f.Close()
|
---|
| 1119 | return nil, fmt.Errorf("cannot obtain info for file %q: %w", filePath, err)
|
---|
| 1120 | }
|
---|
| 1121 |
|
---|
| 1122 | if fileInfo.IsDir() {
|
---|
| 1123 | f.Close()
|
---|
| 1124 | return nil, errDirIndexRequired
|
---|
| 1125 | }
|
---|
| 1126 |
|
---|
| 1127 | if strings.HasSuffix(filePath, h.compressedFileSuffixes[fileEncoding]) ||
|
---|
| 1128 | fileInfo.Size() > fsMaxCompressibleFileSize ||
|
---|
| 1129 | !isFileCompressible(f, fsMinCompressRatio) {
|
---|
| 1130 | return h.newFSFile(f, fileInfo, false, "")
|
---|
| 1131 | }
|
---|
| 1132 |
|
---|
| 1133 | compressedFilePath := filePath + h.compressedFileSuffixes[fileEncoding]
|
---|
| 1134 | absPath, err := filepath.Abs(compressedFilePath)
|
---|
| 1135 | if err != nil {
|
---|
| 1136 | f.Close()
|
---|
| 1137 | return nil, fmt.Errorf("cannot determine absolute path for %q: %s", compressedFilePath, err)
|
---|
| 1138 | }
|
---|
| 1139 |
|
---|
| 1140 | flock := getFileLock(absPath)
|
---|
| 1141 | flock.Lock()
|
---|
| 1142 | ff, err := h.compressFileNolock(f, fileInfo, filePath, compressedFilePath, fileEncoding)
|
---|
| 1143 | flock.Unlock()
|
---|
| 1144 |
|
---|
| 1145 | return ff, err
|
---|
| 1146 | }
|
---|
| 1147 |
|
---|
| 1148 | func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePath, compressedFilePath string, fileEncoding string) (*fsFile, error) {
|
---|
| 1149 | // Attempt to open compressed file created by another concurrent
|
---|
| 1150 | // goroutine.
|
---|
| 1151 | // It is safe opening such a file, since the file creation
|
---|
| 1152 | // is guarded by file mutex - see getFileLock call.
|
---|
| 1153 | if _, err := os.Stat(compressedFilePath); err == nil {
|
---|
| 1154 | f.Close()
|
---|
| 1155 | return h.newCompressedFSFile(compressedFilePath, fileEncoding)
|
---|
| 1156 | }
|
---|
| 1157 |
|
---|
| 1158 | // Create temporary file, so concurrent goroutines don't use
|
---|
| 1159 | // it until it is created.
|
---|
| 1160 | tmpFilePath := compressedFilePath + ".tmp"
|
---|
| 1161 | zf, err := os.Create(tmpFilePath)
|
---|
| 1162 | if err != nil {
|
---|
| 1163 | f.Close()
|
---|
| 1164 | if !os.IsPermission(err) {
|
---|
| 1165 | return nil, fmt.Errorf("cannot create temporary file %q: %w", tmpFilePath, err)
|
---|
| 1166 | }
|
---|
| 1167 | return nil, errNoCreatePermission
|
---|
| 1168 | }
|
---|
| 1169 | if fileEncoding == "br" {
|
---|
| 1170 | zw := acquireStacklessBrotliWriter(zf, CompressDefaultCompression)
|
---|
| 1171 | _, err = copyZeroAlloc(zw, f)
|
---|
| 1172 | if err1 := zw.Flush(); err == nil {
|
---|
| 1173 | err = err1
|
---|
| 1174 | }
|
---|
| 1175 | releaseStacklessBrotliWriter(zw, CompressDefaultCompression)
|
---|
| 1176 | } else if fileEncoding == "gzip" {
|
---|
| 1177 | zw := acquireStacklessGzipWriter(zf, CompressDefaultCompression)
|
---|
| 1178 | _, err = copyZeroAlloc(zw, f)
|
---|
| 1179 | if err1 := zw.Flush(); err == nil {
|
---|
| 1180 | err = err1
|
---|
| 1181 | }
|
---|
| 1182 | releaseStacklessGzipWriter(zw, CompressDefaultCompression)
|
---|
| 1183 | }
|
---|
| 1184 | zf.Close()
|
---|
| 1185 | f.Close()
|
---|
| 1186 | if err != nil {
|
---|
| 1187 | return nil, fmt.Errorf("error when compressing file %q to %q: %w", filePath, tmpFilePath, err)
|
---|
| 1188 | }
|
---|
| 1189 | if err = os.Chtimes(tmpFilePath, time.Now(), fileInfo.ModTime()); err != nil {
|
---|
| 1190 | return nil, fmt.Errorf("cannot change modification time to %s for tmp file %q: %s",
|
---|
| 1191 | fileInfo.ModTime(), tmpFilePath, err)
|
---|
| 1192 | }
|
---|
| 1193 | if err = os.Rename(tmpFilePath, compressedFilePath); err != nil {
|
---|
| 1194 | return nil, fmt.Errorf("cannot move compressed file from %q to %q: %w", tmpFilePath, compressedFilePath, err)
|
---|
| 1195 | }
|
---|
| 1196 | return h.newCompressedFSFile(compressedFilePath, fileEncoding)
|
---|
| 1197 | }
|
---|
| 1198 |
|
---|
| 1199 | func (h *fsHandler) newCompressedFSFile(filePath string, fileEncoding string) (*fsFile, error) {
|
---|
| 1200 | f, err := os.Open(filePath)
|
---|
| 1201 | if err != nil {
|
---|
| 1202 | return nil, fmt.Errorf("cannot open compressed file %q: %w", filePath, err)
|
---|
| 1203 | }
|
---|
| 1204 | fileInfo, err := f.Stat()
|
---|
| 1205 | if err != nil {
|
---|
| 1206 | f.Close()
|
---|
| 1207 | return nil, fmt.Errorf("cannot obtain info for compressed file %q: %w", filePath, err)
|
---|
| 1208 | }
|
---|
| 1209 | return h.newFSFile(f, fileInfo, true, fileEncoding)
|
---|
| 1210 | }
|
---|
| 1211 |
|
---|
| 1212 | func (h *fsHandler) openFSFile(filePath string, mustCompress bool, fileEncoding string) (*fsFile, error) {
|
---|
| 1213 | filePathOriginal := filePath
|
---|
| 1214 | if mustCompress {
|
---|
| 1215 | filePath += h.compressedFileSuffixes[fileEncoding]
|
---|
| 1216 | }
|
---|
| 1217 |
|
---|
| 1218 | f, err := os.Open(filePath)
|
---|
| 1219 | if err != nil {
|
---|
| 1220 | if mustCompress && os.IsNotExist(err) {
|
---|
| 1221 | return h.compressAndOpenFSFile(filePathOriginal, fileEncoding)
|
---|
| 1222 | }
|
---|
| 1223 | return nil, err
|
---|
| 1224 | }
|
---|
| 1225 |
|
---|
| 1226 | fileInfo, err := f.Stat()
|
---|
| 1227 | if err != nil {
|
---|
| 1228 | f.Close()
|
---|
| 1229 | return nil, fmt.Errorf("cannot obtain info for file %q: %w", filePath, err)
|
---|
| 1230 | }
|
---|
| 1231 |
|
---|
| 1232 | if fileInfo.IsDir() {
|
---|
| 1233 | f.Close()
|
---|
| 1234 | if mustCompress {
|
---|
| 1235 | return nil, fmt.Errorf("directory with unexpected suffix found: %q. Suffix: %q",
|
---|
| 1236 | filePath, h.compressedFileSuffixes[fileEncoding])
|
---|
| 1237 | }
|
---|
| 1238 | return nil, errDirIndexRequired
|
---|
| 1239 | }
|
---|
| 1240 |
|
---|
| 1241 | if mustCompress {
|
---|
| 1242 | fileInfoOriginal, err := os.Stat(filePathOriginal)
|
---|
| 1243 | if err != nil {
|
---|
| 1244 | f.Close()
|
---|
| 1245 | return nil, fmt.Errorf("cannot obtain info for original file %q: %w", filePathOriginal, err)
|
---|
| 1246 | }
|
---|
| 1247 |
|
---|
| 1248 | // Only re-create the compressed file if there was more than a second between the mod times.
|
---|
| 1249 | // On MacOS the gzip seems to truncate the nanoseconds in the mod time causing the original file
|
---|
| 1250 | // to look newer than the gzipped file.
|
---|
| 1251 | if fileInfoOriginal.ModTime().Sub(fileInfo.ModTime()) >= time.Second {
|
---|
| 1252 | // The compressed file became stale. Re-create it.
|
---|
| 1253 | f.Close()
|
---|
| 1254 | os.Remove(filePath)
|
---|
| 1255 | return h.compressAndOpenFSFile(filePathOriginal, fileEncoding)
|
---|
| 1256 | }
|
---|
| 1257 | }
|
---|
| 1258 |
|
---|
| 1259 | return h.newFSFile(f, fileInfo, mustCompress, fileEncoding)
|
---|
| 1260 | }
|
---|
| 1261 |
|
---|
| 1262 | func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool, fileEncoding string) (*fsFile, error) {
|
---|
| 1263 | n := fileInfo.Size()
|
---|
| 1264 | contentLength := int(n)
|
---|
| 1265 | if n != int64(contentLength) {
|
---|
| 1266 | f.Close()
|
---|
| 1267 | return nil, fmt.Errorf("too big file: %d bytes", n)
|
---|
| 1268 | }
|
---|
| 1269 |
|
---|
| 1270 | // detect content-type
|
---|
| 1271 | ext := fileExtension(fileInfo.Name(), compressed, h.compressedFileSuffixes[fileEncoding])
|
---|
| 1272 | contentType := mime.TypeByExtension(ext)
|
---|
| 1273 | if len(contentType) == 0 {
|
---|
| 1274 | data, err := readFileHeader(f, compressed, fileEncoding)
|
---|
| 1275 | if err != nil {
|
---|
| 1276 | return nil, fmt.Errorf("cannot read header of the file %q: %w", f.Name(), err)
|
---|
| 1277 | }
|
---|
| 1278 | contentType = http.DetectContentType(data)
|
---|
| 1279 | }
|
---|
| 1280 |
|
---|
| 1281 | lastModified := fileInfo.ModTime()
|
---|
| 1282 | ff := &fsFile{
|
---|
| 1283 | h: h,
|
---|
| 1284 | f: f,
|
---|
| 1285 | contentType: contentType,
|
---|
| 1286 | contentLength: contentLength,
|
---|
| 1287 | compressed: compressed,
|
---|
| 1288 | lastModified: lastModified,
|
---|
| 1289 | lastModifiedStr: AppendHTTPDate(nil, lastModified),
|
---|
| 1290 |
|
---|
| 1291 | t: time.Now(),
|
---|
| 1292 | }
|
---|
| 1293 | return ff, nil
|
---|
| 1294 | }
|
---|
| 1295 |
|
---|
| 1296 | func readFileHeader(f *os.File, compressed bool, fileEncoding string) ([]byte, error) {
|
---|
| 1297 | r := io.Reader(f)
|
---|
| 1298 | var (
|
---|
| 1299 | br *brotli.Reader
|
---|
| 1300 | zr *gzip.Reader
|
---|
| 1301 | )
|
---|
| 1302 | if compressed {
|
---|
| 1303 | var err error
|
---|
| 1304 | if fileEncoding == "br" {
|
---|
| 1305 | if br, err = acquireBrotliReader(f); err != nil {
|
---|
| 1306 | return nil, err
|
---|
| 1307 | }
|
---|
| 1308 | r = br
|
---|
| 1309 | } else if fileEncoding == "gzip" {
|
---|
| 1310 | if zr, err = acquireGzipReader(f); err != nil {
|
---|
| 1311 | return nil, err
|
---|
| 1312 | }
|
---|
| 1313 | r = zr
|
---|
| 1314 | }
|
---|
| 1315 | }
|
---|
| 1316 |
|
---|
| 1317 | lr := &io.LimitedReader{
|
---|
| 1318 | R: r,
|
---|
| 1319 | N: 512,
|
---|
| 1320 | }
|
---|
| 1321 | data, err := ioutil.ReadAll(lr)
|
---|
| 1322 | if _, err := f.Seek(0, 0); err != nil {
|
---|
| 1323 | return nil, err
|
---|
| 1324 | }
|
---|
| 1325 |
|
---|
| 1326 | if br != nil {
|
---|
| 1327 | releaseBrotliReader(br)
|
---|
| 1328 | }
|
---|
| 1329 |
|
---|
| 1330 | if zr != nil {
|
---|
| 1331 | releaseGzipReader(zr)
|
---|
| 1332 | }
|
---|
| 1333 |
|
---|
| 1334 | return data, err
|
---|
| 1335 | }
|
---|
| 1336 |
|
---|
| 1337 | func stripLeadingSlashes(path []byte, stripSlashes int) []byte {
|
---|
| 1338 | for stripSlashes > 0 && len(path) > 0 {
|
---|
| 1339 | if path[0] != '/' {
|
---|
| 1340 | panic("BUG: path must start with slash")
|
---|
| 1341 | }
|
---|
| 1342 | n := bytes.IndexByte(path[1:], '/')
|
---|
| 1343 | if n < 0 {
|
---|
| 1344 | path = path[:0]
|
---|
| 1345 | break
|
---|
| 1346 | }
|
---|
| 1347 | path = path[n+1:]
|
---|
| 1348 | stripSlashes--
|
---|
| 1349 | }
|
---|
| 1350 | return path
|
---|
| 1351 | }
|
---|
| 1352 |
|
---|
| 1353 | func stripTrailingSlashes(path []byte) []byte {
|
---|
| 1354 | for len(path) > 0 && path[len(path)-1] == '/' {
|
---|
| 1355 | path = path[:len(path)-1]
|
---|
| 1356 | }
|
---|
| 1357 | return path
|
---|
| 1358 | }
|
---|
| 1359 |
|
---|
| 1360 | func fileExtension(path string, compressed bool, compressedFileSuffix string) string {
|
---|
| 1361 | if compressed && strings.HasSuffix(path, compressedFileSuffix) {
|
---|
| 1362 | path = path[:len(path)-len(compressedFileSuffix)]
|
---|
| 1363 | }
|
---|
| 1364 | n := strings.LastIndexByte(path, '.')
|
---|
| 1365 | if n < 0 {
|
---|
| 1366 | return ""
|
---|
| 1367 | }
|
---|
| 1368 | return path[n:]
|
---|
| 1369 | }
|
---|
| 1370 |
|
---|
| 1371 | // FileLastModified returns last modified time for the file.
|
---|
| 1372 | func FileLastModified(path string) (time.Time, error) {
|
---|
| 1373 | f, err := os.Open(path)
|
---|
| 1374 | if err != nil {
|
---|
| 1375 | return zeroTime, err
|
---|
| 1376 | }
|
---|
| 1377 | fileInfo, err := f.Stat()
|
---|
| 1378 | f.Close()
|
---|
| 1379 | if err != nil {
|
---|
| 1380 | return zeroTime, err
|
---|
| 1381 | }
|
---|
| 1382 | return fsModTime(fileInfo.ModTime()), nil
|
---|
| 1383 | }
|
---|
| 1384 |
|
---|
| 1385 | func fsModTime(t time.Time) time.Time {
|
---|
| 1386 | return t.In(time.UTC).Truncate(time.Second)
|
---|
| 1387 | }
|
---|
| 1388 |
|
---|
| 1389 | var filesLockMap sync.Map
|
---|
| 1390 |
|
---|
| 1391 | func getFileLock(absPath string) *sync.Mutex {
|
---|
| 1392 | v, _ := filesLockMap.LoadOrStore(absPath, &sync.Mutex{})
|
---|
| 1393 | filelock := v.(*sync.Mutex)
|
---|
| 1394 | return filelock
|
---|
| 1395 | }
|
---|