_config.yml 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. title: DeepSpeed
  2. email: deepspeed@microsoft.com
  3. description: >-
  4. DeepSpeed is a deep learning optimization library that makes distributed
  5. training easy, efficient, and effective.
  6. locale : "en-US"
  7. logo: /assets/images/deepspeed-logo-uppercase-bold-white-1.15.svg
  8. repository: microsoft/DeepSpeed
  9. baseurl: "/" # the subpath of your site, e.g. /blog
  10. url: "https://www.deepspeed.ai" # the base hostname & protocol for your site, e.g. http://example.com
  11. # Build settings
  12. remote_theme: "mmistakes/minimal-mistakes@4.19.0"
  13. minimal_mistakes_skin : "air"
  14. search: true
  15. plugins:
  16. - jekyll-feed
  17. - jekyll-include-cache
  18. - jekyll-paginate
  19. #paginate: 10
  20. #paginate_path: /blog/page:num
  21. include: ["_pages"]
  22. exclude: ["code-docs"]
  23. collections:
  24. tutorials:
  25. output: true
  26. permalink: /:collection/:path/
  27. order:
  28. - advanced-install.md
  29. - getting-started.md
  30. - azure.md
  31. - automatic-tensor-parallelism.md
  32. - bert-finetuning.md
  33. - bert-pretraining.md
  34. - cifar-10.md
  35. - curriculum-learning.md
  36. - data-efficiency.md
  37. - flops-profiler.md
  38. - pytorch-profiler.md
  39. - autotuning.md
  40. - gan.md
  41. - lrrt.md
  42. - megatron.md
  43. - mixture-of-experts.md
  44. - mixture-of-experts-nlg.md
  45. - mixture-of-experts-inference.md
  46. - model-compression.md
  47. - monitor.md
  48. - comms-logging.md
  49. - one-cycle.md
  50. - onebit-adam.md
  51. - zero-one-adam.md
  52. - onebit-lamb.md
  53. - pipeline.md
  54. - progressive_layer_dropping.md
  55. - sparse-attention.md
  56. - transformer_kernel.md
  57. - zero-offload.md
  58. - zero.md
  59. defaults:
  60. - scope:
  61. path: ""
  62. values:
  63. layout: single
  64. author_profile: false
  65. read_time: false
  66. comments: false
  67. share: false
  68. related: false
  69. sneak_preview: false
  70. toc: true
  71. toc_label: "Contents"
  72. sidebar:
  73. nav: "lnav"
  74. - scope:
  75. path: "_pages"
  76. values:
  77. permalink: /docs/:basename/
  78. toc: true
  79. toc_label: "Contents"
  80. - scope:
  81. path: ""
  82. type: posts
  83. values:
  84. layout: single-full
  85. author_profile: false
  86. read_time: false
  87. comments: false
  88. share: true
  89. related: false
  90. toc: true
  91. toc_label: "Contents"
  92. toc_sticky: true
  93. show_date: true
  94. - scope:
  95. path: ""
  96. type: tutorials
  97. values:
  98. layout: single
  99. toc_sticky: true
  100. analytics:
  101. provider: "google-gtag"
  102. google:
  103. tracking_id: "UA-169781858-1"
  104. timezone: America/Los_Angeles
  105. breadcrumbs: true
  106. press_release_v3: https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/
  107. press_release_v5: https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/
  108. press_release_v6: https://www.microsoft.com/en-us/research/blog/deepspeed-advancing-moe-inference-and-training-to-power-next-generation-ai-scale/