upload.go 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. package server
  2. import (
  3. "context"
  4. "crypto/md5"
  5. "errors"
  6. "fmt"
  7. "hash"
  8. "io"
  9. "log/slog"
  10. "math"
  11. "net/http"
  12. "net/url"
  13. "os"
  14. "sync"
  15. "sync/atomic"
  16. "time"
  17. "github.com/ollama/ollama/api"
  18. "github.com/ollama/ollama/format"
  19. "golang.org/x/sync/errgroup"
  20. )
  21. var blobUploadManager sync.Map
  22. type blobUpload struct {
  23. *Layer
  24. Total int64
  25. Completed atomic.Int64
  26. Parts []blobUploadPart
  27. nextURL chan *url.URL
  28. context.CancelFunc
  29. file *os.File
  30. done bool
  31. err error
  32. references atomic.Int32
  33. }
  34. const (
  35. numUploadParts = 64
  36. minUploadPartSize int64 = 100 * format.MegaByte
  37. maxUploadPartSize int64 = 1000 * format.MegaByte
  38. )
  39. func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *registryOptions) error {
  40. p, err := GetBlobsPath(b.Digest)
  41. if err != nil {
  42. return err
  43. }
  44. if b.From != "" {
  45. values := requestURL.Query()
  46. values.Add("mount", b.Digest)
  47. values.Add("from", ParseModelPath(b.From).GetNamespaceRepository())
  48. requestURL.RawQuery = values.Encode()
  49. }
  50. resp, err := makeRequestWithRetry(ctx, http.MethodPost, requestURL, nil, nil, opts)
  51. if err != nil {
  52. return err
  53. }
  54. defer resp.Body.Close()
  55. location := resp.Header.Get("Docker-Upload-Location")
  56. if location == "" {
  57. location = resp.Header.Get("Location")
  58. }
  59. fi, err := os.Stat(p)
  60. if err != nil {
  61. return err
  62. }
  63. b.Total = fi.Size()
  64. // http.StatusCreated indicates a blob has been mounted
  65. // ref: https://distribution.github.io/distribution/spec/api/#cross-repository-blob-mount
  66. if resp.StatusCode == http.StatusCreated {
  67. b.Completed.Store(b.Total)
  68. b.done = true
  69. return nil
  70. }
  71. size := b.Total / numUploadParts
  72. switch {
  73. case size < minUploadPartSize:
  74. size = minUploadPartSize
  75. case size > maxUploadPartSize:
  76. size = maxUploadPartSize
  77. }
  78. var offset int64
  79. for offset < fi.Size() {
  80. if offset+size > fi.Size() {
  81. size = fi.Size() - offset
  82. }
  83. // set part.N to the current number of parts
  84. b.Parts = append(b.Parts, blobUploadPart{N: len(b.Parts), Offset: offset, Size: size})
  85. offset += size
  86. }
  87. slog.Info(fmt.Sprintf("uploading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size)))
  88. requestURL, err = url.Parse(location)
  89. if err != nil {
  90. return err
  91. }
  92. b.nextURL = make(chan *url.URL, 1)
  93. b.nextURL <- requestURL
  94. return nil
  95. }
  96. // Run uploads blob parts to the upstream. If the upstream supports redirection, parts will be uploaded
  97. // in parallel as defined by Prepare. Otherwise, parts will be uploaded serially. Run sets b.err on error.
  98. func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) {
  99. defer blobUploadManager.Delete(b.Digest)
  100. ctx, b.CancelFunc = context.WithCancel(ctx)
  101. p, err := GetBlobsPath(b.Digest)
  102. if err != nil {
  103. b.err = err
  104. return
  105. }
  106. b.file, err = os.Open(p)
  107. if err != nil {
  108. b.err = err
  109. return
  110. }
  111. defer b.file.Close()
  112. g, inner := errgroup.WithContext(ctx)
  113. g.SetLimit(numUploadParts)
  114. for i := range b.Parts {
  115. part := &b.Parts[i]
  116. select {
  117. case <-inner.Done():
  118. case requestURL := <-b.nextURL:
  119. g.Go(func() error {
  120. var err error
  121. for try := range maxRetries {
  122. err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts)
  123. switch {
  124. case errors.Is(err, context.Canceled):
  125. return err
  126. case errors.Is(err, errMaxRetriesExceeded):
  127. return err
  128. case err != nil:
  129. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  130. slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
  131. time.Sleep(sleep)
  132. continue
  133. }
  134. return nil
  135. }
  136. return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
  137. })
  138. }
  139. }
  140. if err := g.Wait(); err != nil {
  141. b.err = err
  142. return
  143. }
  144. requestURL := <-b.nextURL
  145. // calculate md5 checksum and add it to the commit request
  146. md5sum := md5.New()
  147. for _, part := range b.Parts {
  148. md5sum.Write(part.Sum(nil))
  149. }
  150. values := requestURL.Query()
  151. values.Add("digest", b.Digest)
  152. values.Add("etag", fmt.Sprintf("%x-%d", md5sum.Sum(nil), len(b.Parts)))
  153. requestURL.RawQuery = values.Encode()
  154. headers := make(http.Header)
  155. headers.Set("Content-Type", "application/octet-stream")
  156. headers.Set("Content-Length", "0")
  157. for try := range maxRetries {
  158. var resp *http.Response
  159. resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts)
  160. if errors.Is(err, context.Canceled) {
  161. break
  162. } else if err != nil {
  163. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  164. slog.Info(fmt.Sprintf("%s complete upload attempt %d failed: %v, retrying in %s", b.Digest[7:19], try, err, sleep))
  165. time.Sleep(sleep)
  166. continue
  167. }
  168. defer resp.Body.Close()
  169. break
  170. }
  171. b.err = err
  172. b.done = true
  173. }
  174. func (b *blobUpload) uploadPart(ctx context.Context, method string, requestURL *url.URL, part *blobUploadPart, opts *registryOptions) error {
  175. headers := make(http.Header)
  176. headers.Set("Content-Type", "application/octet-stream")
  177. headers.Set("Content-Length", fmt.Sprintf("%d", part.Size))
  178. if method == http.MethodPatch {
  179. headers.Set("X-Redirect-Uploads", "1")
  180. headers.Set("Content-Range", fmt.Sprintf("%d-%d", part.Offset, part.Offset+part.Size-1))
  181. }
  182. sr := io.NewSectionReader(b.file, part.Offset, part.Size)
  183. md5sum := md5.New()
  184. w := &progressWriter{blobUpload: b}
  185. resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sr, io.MultiWriter(w, md5sum)), opts)
  186. if err != nil {
  187. w.Rollback()
  188. return err
  189. }
  190. defer resp.Body.Close()
  191. location := resp.Header.Get("Docker-Upload-Location")
  192. if location == "" {
  193. location = resp.Header.Get("Location")
  194. }
  195. nextURL, err := url.Parse(location)
  196. if err != nil {
  197. w.Rollback()
  198. return err
  199. }
  200. switch {
  201. case resp.StatusCode == http.StatusTemporaryRedirect:
  202. w.Rollback()
  203. b.nextURL <- nextURL
  204. redirectURL, err := resp.Location()
  205. if err != nil {
  206. return err
  207. }
  208. // retry uploading to the redirect URL
  209. for try := range maxRetries {
  210. err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, nil)
  211. switch {
  212. case errors.Is(err, context.Canceled):
  213. return err
  214. case errors.Is(err, errMaxRetriesExceeded):
  215. return err
  216. case err != nil:
  217. sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
  218. slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
  219. time.Sleep(sleep)
  220. continue
  221. }
  222. return nil
  223. }
  224. return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
  225. case resp.StatusCode == http.StatusUnauthorized:
  226. w.Rollback()
  227. challenge := parseRegistryChallenge(resp.Header.Get("www-authenticate"))
  228. token, err := getAuthorizationToken(ctx, challenge)
  229. if err != nil {
  230. return err
  231. }
  232. opts.Token = token
  233. fallthrough
  234. case resp.StatusCode >= http.StatusBadRequest:
  235. w.Rollback()
  236. body, err := io.ReadAll(resp.Body)
  237. if err != nil {
  238. return err
  239. }
  240. return fmt.Errorf("http status %s: %s", resp.Status, body)
  241. }
  242. if method == http.MethodPatch {
  243. b.nextURL <- nextURL
  244. }
  245. part.Hash = md5sum
  246. return nil
  247. }
  248. func (b *blobUpload) acquire() {
  249. b.references.Add(1)
  250. }
  251. func (b *blobUpload) release() {
  252. if b.references.Add(-1) == 0 {
  253. b.CancelFunc()
  254. }
  255. }
  256. func (b *blobUpload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
  257. b.acquire()
  258. defer b.release()
  259. ticker := time.NewTicker(60 * time.Millisecond)
  260. for {
  261. select {
  262. case <-ticker.C:
  263. case <-ctx.Done():
  264. return ctx.Err()
  265. }
  266. fn(api.ProgressResponse{
  267. Status: fmt.Sprintf("pushing %s", b.Digest[7:19]),
  268. Digest: b.Digest,
  269. Total: b.Total,
  270. Completed: b.Completed.Load(),
  271. })
  272. if b.done || b.err != nil {
  273. return b.err
  274. }
  275. }
  276. }
  277. type blobUploadPart struct {
  278. // N is the part number
  279. N int
  280. Offset int64
  281. Size int64
  282. hash.Hash
  283. }
  284. type progressWriter struct {
  285. written int64
  286. *blobUpload
  287. }
  288. func (p *progressWriter) Write(b []byte) (n int, err error) {
  289. n = len(b)
  290. p.written += int64(n)
  291. p.Completed.Add(int64(n))
  292. return n, nil
  293. }
  294. func (p *progressWriter) Rollback() {
  295. p.Completed.Add(-p.written)
  296. p.written = 0
  297. }
  298. func uploadBlob(ctx context.Context, mp ModelPath, layer *Layer, opts *registryOptions, fn func(api.ProgressResponse)) error {
  299. requestURL := mp.BaseURL()
  300. requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs", layer.Digest)
  301. resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts)
  302. switch {
  303. case errors.Is(err, os.ErrNotExist):
  304. case err != nil:
  305. return err
  306. default:
  307. defer resp.Body.Close()
  308. fn(api.ProgressResponse{
  309. Status: fmt.Sprintf("pushing %s", layer.Digest[7:19]),
  310. Digest: layer.Digest,
  311. Total: layer.Size,
  312. Completed: layer.Size,
  313. })
  314. return nil
  315. }
  316. data, ok := blobUploadManager.LoadOrStore(layer.Digest, &blobUpload{Layer: layer})
  317. upload := data.(*blobUpload)
  318. if !ok {
  319. requestURL := mp.BaseURL()
  320. requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs/uploads/")
  321. if err := upload.Prepare(ctx, requestURL, opts); err != nil {
  322. blobUploadManager.Delete(layer.Digest)
  323. return err
  324. }
  325. //nolint:contextcheck
  326. go upload.Run(context.Background(), opts)
  327. }
  328. return upload.Wait(ctx, fn)
  329. }