_config.yml 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. title: DeepSpeed
  2. email: deepspeed@microsoft.com
  3. description: >-
  4. DeepSpeed is a deep learning optimization library that makes distributed
  5. training easy, efficient, and effective.
  6. locale : "en-US"
  7. logo: /assets/images/deepspeed-logo-uppercase-bold-white-1.15.svg
  8. repository: microsoft/DeepSpeed
  9. baseurl: "/" # the subpath of your site, e.g. /blog
  10. url: "https://www.deepspeed.ai" # the base hostname & protocol for your site, e.g. http://example.com
  11. # Build settings
  12. remote_theme: "mmistakes/minimal-mistakes@4.19.0"
  13. minimal_mistakes_skin : "air"
  14. search: true
  15. plugins:
  16. - jekyll-feed
  17. - jekyll-include-cache
  18. - jekyll-paginate
  19. #paginate: 10
  20. #paginate_path: /blog/page:num
  21. include: ["_pages"]
  22. exclude: ["code-docs"]
  23. collections:
  24. tutorials:
  25. output: true
  26. permalink: /:collection/:path/
  27. order:
  28. - advanced-install.md
  29. - getting-started.md
  30. - azure.md
  31. - automatic-tensor-parallelism.md
  32. - bert-finetuning.md
  33. - bert-pretraining.md
  34. - cifar-10.md
  35. - curriculum-learning.md
  36. - data-efficiency.md
  37. - ds4sci_evoformerattention.md
  38. - flops-profiler.md
  39. - pytorch-profiler.md
  40. - autotuning.md
  41. - gan.md
  42. - lrrt.md
  43. - megatron.md
  44. - mixture-of-experts.md
  45. - mixture-of-experts-nlg.md
  46. - mixture-of-experts-inference.md
  47. - model-compression.md
  48. - monitor.md
  49. - comms-logging.md
  50. - one-cycle.md
  51. - onebit-adam.md
  52. - zero-one-adam.md
  53. - onebit-lamb.md
  54. - pipeline.md
  55. - progressive_layer_dropping.md
  56. - sparse-attention.md
  57. - transformer_kernel.md
  58. - zero-offload.md
  59. - zero.md
  60. defaults:
  61. - scope:
  62. path: ""
  63. values:
  64. layout: single
  65. author_profile: false
  66. read_time: false
  67. comments: false
  68. share: false
  69. related: false
  70. sneak_preview: false
  71. toc: true
  72. toc_label: "Contents"
  73. sidebar:
  74. nav: "lnav"
  75. - scope:
  76. path: "_pages"
  77. values:
  78. permalink: /docs/:basename/
  79. toc: true
  80. toc_label: "Contents"
  81. - scope:
  82. path: ""
  83. type: posts
  84. values:
  85. layout: single-full
  86. author_profile: false
  87. read_time: false
  88. comments: false
  89. share: true
  90. related: false
  91. toc: true
  92. toc_label: "Contents"
  93. toc_sticky: true
  94. show_date: true
  95. - scope:
  96. path: ""
  97. type: tutorials
  98. values:
  99. layout: single
  100. toc_sticky: true
  101. analytics:
  102. provider: "google-gtag"
  103. google:
  104. tracking_id: "UA-169781858-1"
  105. timezone: America/Los_Angeles
  106. breadcrumbs: true
  107. press_release_v3: https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/
  108. press_release_v5: https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/
  109. press_release_v6: https://www.microsoft.com/en-us/research/blog/deepspeed-advancing-moe-inference-and-training-to-power-next-generation-ai-scale/