index.html 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. <!DOCTYPE html>
  2. <html>
  3. <head>
  4. <meta charset="utf-8">
  5. <!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
  6. <!-- Replace the content tag with appropriate information -->
  7. <meta name="description" content="DESCRIPTION META TAG">
  8. <meta property="og:title" content="SOCIAL MEDIA TITLE TAG" />
  9. <meta property="og:description" content="SOCIAL MEDIA DESCRIPTION TAG TAG" />
  10. <meta property="og:url" content="URL OF THE WEBSITE" />
  11. <!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
  12. <meta property="og:image" content="static/image/your_banner_image.png" />
  13. <meta property="og:image:width" content="1200" />
  14. <meta property="og:image:height" content="630" />
  15. <meta name="twitter:title" content="TWITTER BANNER TITLE META TAG">
  16. <meta name="twitter:description" content="TWITTER BANNER DESCRIPTION META TAG">
  17. <!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
  18. <meta name="twitter:image" content="static/images/your_twitter_banner_image.png">
  19. <meta name="twitter:card" content="summary_large_image">
  20. <!-- Keywords for your paper to be indexed by-->
  21. <meta name="keywords" content="KEYWORDS SHOULD BE PLACED HERE">
  22. <meta name="viewport" content="width=device-width, initial-scale=1">
  23. <title>OmniParser</title>
  24. <!-- <link rel="icon" type="image/x-icon" href="static/images/mammoth_icon.png"> -->
  25. <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro" rel="stylesheet">
  26. <link rel="stylesheet" href="static/css/bulma.min.css">
  27. <link rel="stylesheet" href="static/css/bulma-carousel.min.css">
  28. <link rel="stylesheet" href="static/css/bulma-slider.min.css">
  29. <link rel="stylesheet" href="static/css/fontawesome.all.min.css">
  30. <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
  31. <link rel="stylesheet" href="static/css/index.css">
  32. <script src="static/js/jquery.min.js"></script>
  33. <script src="static/js/main.js"></script>
  34. <script defer src="static/js/fontawesome.all.min.js"></script>
  35. <script src="static/js/bulma-carousel.min.js"></script>
  36. <script src="static/js/bulma-slider.min.js"></script>
  37. <script src="static/js/index.js"></script>
  38. <link rel="stylesheet" type="text/css" href="static/css/jquery.dataTables.css">
  39. <script type="text/javascript" charset="utf8" src="static/js/jquery-3.5.1.js"></script>
  40. <script type="text/javascript" charset="utf8" src="static/js/jquery.dataTables.js"></script>
  41. </head>
  42. <body>
  43. <section class="hero">
  44. <div class="hero-body">
  45. <div class="container is-max-desktop">
  46. <div class="columns is-centered">
  47. <div class="column has-text-centered">
  48. <h1 class="title is-1 publication-title">OmniParser for Pure Vision Based GUI Agent</h1>
  49. <div class="is-size-5 publication-authors">
  50. <!-- Paper authors -->
  51. <span class="author-block">
  52. <a href="https://adamlu123.github.io/" target="_blank">Yadong Lu</a><sup>1</sup>,
  53. </span>
  54. <span class="author-block">
  55. <a href="https://jwyang.github.io/" target="_blank">Jianwei Yang</a><sup>1</sup>,
  56. </span>
  57. <span class="author-block">
  58. <a href="https://scholar.google.com/citations?user=S6OFEFEAAAAJ&hl=en" target="_blank">Yelong Shen</a><sup>2</sup>,
  59. </span>
  60. <span class="author-block">
  61. <a href="https://www.microsoft.com/en-us/research/people/hassanam/?from=https://research.microsoft.com/en-us/um/people/hassanam/&type=exact" target="_blank">Ahmed Awadallah</a><sup>1</sup>,
  62. </span>
  63. </div>
  64. <div class="is-size-5 publication-authors">
  65. <span class="author-block">
  66. <sup>1</sup>Microsoft Research,
  67. <sup>2</sup>Microsoft Gen AI,<br>
  68. </div>
  69. <div class="column has-text-centered">
  70. <div class="publication-links">
  71. <!-- Github link -->
  72. <span class="link-block">
  73. <a href="https://github.com/microsoft/OmniParser" target="_blank"
  74. class="external-link button is-normal is-rounded is-dark">
  75. <span class="icon">
  76. <i class="fab fa-github"></i>
  77. </span>
  78. <span>Code</span>
  79. </a>
  80. </span>
  81. <!-- ArXiv abstract Link -->
  82. <span class="link-block">
  83. <a href="https://arxiv.org/pdf/2408.00203" target="_blank"
  84. class="external-link button is-normal is-rounded is-dark">
  85. <span class="icon">
  86. <i class="ai ai-arxiv"></i>
  87. </span>
  88. <span>arXiv</span>
  89. </a>
  90. </span>
  91. <span class="link-block">
  92. <a href="https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/"
  93. class="external-link button is-normal is-rounded is-dark">
  94. <span class="icon">
  95. <i class="fas fa-book"></i>
  96. </span>
  97. <span>Blog post</span>
  98. </a>
  99. </span>
  100. <!-- Demo link -->
  101. <!-- <span class="link-block"> -->
  102. <!-- <a href="https://46ea00cff2fc968db7.gradio.live" target="_blank" -->
  103. <!-- class="external-link button is-normal is-rounded is-dark"> -->
  104. <!-- <span class="icon"> -->
  105. <!-- <i class="fab fa-github"></i> -->
  106. <!-- </span> -->
  107. <!-- <span>Demo</span>
  108. </a>
  109. </span>
  110. -->
  111. </div>
  112. </div>
  113. </div>
  114. </div>
  115. </div>
  116. </div>
  117. </section>
  118. <!-- Paper abstract -->
  119. <section class="hero">
  120. <div class="hero-body">
  121. <div class="container is-max-desktop">
  122. <div class="columns is-centered">
  123. <div class="column has-text-centered">
  124. <h2 class="title is-3">Abstract</h2>
  125. <div class="content has-text-justified">
  126. <p>
  127. The recent success of large vision language models shows great potential in driving the agent system operating on user interfaces. However, we argue that the
  128. power multimodal models like GPT-4V as a general agent on multiple operating
  129. systems across different applications is largely underestimated due to the lack of
  130. a robust screen parsing technique capable of: 1. reliably identifying interactable
  131. icons within the user interface, and 2. understanding the semantics of various
  132. elements in a screenshot and accurately associate the intended action with the
  133. corresponding region on the screen. To fill these gaps, we introduce OMNIPARSER,
  134. a comprehensive method for parsing user interface screenshots into structured
  135. elements, which significantly enhances the ability of GPT-4V to generate actions
  136. that can be accurately grounded in the corresponding regions of the interface. We
  137. first curated an interactable icon detection dataset using popular webpages and
  138. an icon description dataset. These datasets were utilized to fine-tune specialized
  139. models: a detection model to parse interactable regions on the screen and a caption
  140. model to extract the functional semantics of the detected elements. OMNIPARSER
  141. significantly improves GPT-4V's performance on ScreenSpot benchmark. And
  142. on Mind2Web and AITW benchmark, OMNIPARSER with screenshot only input
  143. outperforms the GPT-4V baselines requiring additional information outside of
  144. screenshot
  145. </p>
  146. </div>
  147. </div>
  148. </div>
  149. </div>
  150. </div>
  151. </section>
  152. <!-- End paper abstract -->
  153. <!-- Image carousel -->
  154. <section class="hero">
  155. <div class="hero-body">
  156. <div class="container is-max-desktop">
  157. <div class="columns is-centered">
  158. <!-- <div class="column is-four-fifths"> -->
  159. <div class="item">
  160. <!-- Your image here -->
  161. <img src="static/images/flow_merged0.png" alt="Result mobile" />
  162. <img src="static/images/flow_merged1.png" alt="Result mobile" />
  163. <img src="static/images/flow_merged2.png" alt="Result mobile" />
  164. <h2 class="subtitle">
  165. Examples of parsed screenshot image and local semantics by OmniParser. The inputs to OmniParse are user task and UI screenshot, from which it will produce: 1. parsed screenshot image with bounding boxes and numeric IDs overlayed, and 2. local semantics contains both text extracted and icon description.
  166. </h2>
  167. </div>
  168. <!-- </div> -->
  169. </div>
  170. </div>
  171. </div>
  172. </section>
  173. <!-- End image carousel -->
  174. <section class="hero is-small">
  175. <div class="hero-body">
  176. <div class="container is-max-desktop">
  177. <div class="columns is-centered has-text-centered">
  178. <div class="column is-four-fifths">
  179. <h2 class="title is-3">Curated Dataset for Interactable Region Detection and Icon Functionality Description</h2>
  180. <br>
  181. We curate a dataset of interactable icon detection dataset, containing 67k unique screenshot images, each labeled with bounding boxes of interactable icons derived from DOM tree. We first took a 100k uniform sample of popular publicly availabe urls on the clueweb dataset, and collect bounding boxes of interactable regions of the webpage from the DOM tree of each urls. We also collected 7k icon-description
  182. pairs for finetuning the caption model.
  183. <div class="item">
  184. <img src="static/images/curated_data.png" alt="Species Classification results on iWildCam2020-WILDS (OOD) dataset" />
  185. <p>
  186. <b>Examples from the Interactable Region Detection dataset. </b>. TThe bounding boxes are based on the interactable region extracted from the DOM tree of the webpage.
  187. </p>
  188. </div>
  189. </div>
  190. </div>
  191. </div>
  192. </div>
  193. </section>
  194. <!--
  195. <section class="hero is-light is-small">
  196. <div class="hero-body">
  197. <div class="container">
  198. <div id="results-carousel" class="carousel results-carousel">
  199. <div class="item item-steve">
  200. <video poster="" id="steve" autoplay controls muted loop playsinline height="100%">
  201. <source src="./static/videos/web_5_demo_nocap.mp4"
  202. type="video/mp4">
  203. </video>
  204. </div>
  205. <div class="item item-chair-tp">
  206. <video poster="" id="chair-tp" autoplay controls muted loop playsinline height="100%">
  207. <source src="./static/videos/web_22_demo_nocap.mp4"
  208. type="video/mp4">
  209. </video>
  210. </div>
  211. </div>
  212. </div>
  213. </div>
  214. </section> -->
  215. <section class="hero is-small">
  216. <div class="hero-body">
  217. <div class="container is-max-desktop">
  218. <div class="columns is-centered has-text-centered">
  219. <div class="column is-four-fifths">
  220. <h2 class="title is-3">Results</h2>
  221. <br>
  222. We evaluate our model on SeeClick, Mind2Web, and AITW benchmarks. We show that our model outperforms the GPT-4V baseline on all benchmarks. We also show that our model with screenshot only input outperforms the GPT-4V baselines requiring additional information outside of screenshot.
  223. <div class="item">
  224. <img src="static/images/seeclick.png" alt="seeclick" />
  225. <img src="static/images/m2w.png" alt="mind2web" />
  226. <img src="static/images/aitw.png" alt="aitw" />
  227. </div>
  228. </div>
  229. </div>
  230. </div>
  231. </div>
  232. </section>
  233. <section class="hero is-small"></section>
  234. <div class="hero-body">
  235. <div class="container is-max-desktop">
  236. <div class="columns is-centered has-text-centered">
  237. <div class="column is-four-fifths">
  238. <h2 class="title is-3">Plugin-ready for Other Vision Language Models</h2>
  239. <br>
  240. To further demonstrate OmniParser is a plugin choice for off-the-shelf vision langauge models, we show the performance of OmniParser combined with recently announced vision language models: Phi-3.5-V and Llama-3.2-V. As seen in table, our finetuned interactable region detection (ID) model significantly improves the task performance compared to grounding dino model (w.o. ID) with local semantics across all subcategories for GPT-4V, Phi-3.5-V and Llama-3.2-V. In addition, the local semantics of icon functionality helps significantly with the performance for every vision language model.
  241. In the table, LS is short for local semantics of icon functionality, ID is short for the interactable region detection model we finetune. The setting w.o. ID means we replace the ID model with original Grounding DINO model not finetuned on our data, and with local semantics. The setting w.o. ID and w.o LS means we use Grounding DINO model, and further without using the icon description in the text prompt.
  242. <div class="item">
  243. <img src="static/images/ablation.png" alt="seeclick" />
  244. </div>
  245. </div>
  246. </div>
  247. </div>
  248. </div>
  249. </section>
  250. <section class="section">
  251. <div class="container is-max-desktop">
  252. <h2 class="title is-3">Demo of Mind2Web Tasks </h2>
  253. <div class="columns is-centered">
  254. <!-- Visual Effects. -->
  255. <div class="column">
  256. <div class="content">
  257. <!-- <h2 class="title is-3">Visual Effects</h2> -->
  258. <!-- <p>
  259. Using <i>nerfies</i> you can create fun visual effects. This Dolly zoom effect
  260. would be impossible without nerfies since it would require going through a wall.
  261. </p> -->
  262. <video id="dollyzoom" autoplay controls muted loop playsinline height="100%">
  263. <source src="./static/videos/web_5_demo_nocap.mp4"
  264. type="video/mp4">
  265. </video>
  266. </div>
  267. </div>
  268. <!--/ Visual Effects. -->
  269. <!-- Matting. -->
  270. <div class="column">
  271. <!-- <h2 class="title is-3">Matting</h2> -->
  272. <div class="columns is-centered">
  273. <div class="column content">
  274. <!-- <p>
  275. As a byproduct of our method, we can also solve the matting problem by ignoring
  276. samples that fall outside of a bounding box during rendering.
  277. </p> -->
  278. <video id="matting-video" controls playsinline height="100%">
  279. <source src="./static/videos/web_22_demo_nocap.mp4"
  280. type="video/mp4">
  281. </video>
  282. </div>
  283. </div>
  284. </div>
  285. </div>
  286. </section>
  287. <br>
  288. <!-- BibTex citation -->
  289. <section class="section" id="BibTeX">
  290. <div class="container is-max-desktop content">
  291. <h2 class="title">Citation</h2>
  292. <!-- Please cite our paper if you use our code, data, model or results: -->
  293. <!-- <br> -->
  294. <pre><code>@misc{lu2024omniparserpurevisionbased,
  295. title={OmniParser for Pure Vision Based GUI Agent},
  296. author={Yadong Lu and Jianwei Yang and Yelong Shen and Ahmed Awadallah},
  297. year={2024},
  298. eprint={2408.00203},
  299. archivePrefix={arXiv},
  300. primaryClass={cs.CV},
  301. url={https://arxiv.org/abs/2408.00203},
  302. }
  303. </code></pre>
  304. </div>
  305. </section>
  306. <footer class="footer">
  307. <div class="container">
  308. <div class="columns is-centered">
  309. <div class="column is-8">
  310. <div class="content">
  311. <p>
  312. This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page. You are free to borrow the of this website, we just ask that you link back to this page in the footer. <br> This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"
  313. target="_blank">Creative
  314. Commons Attribution-ShareAlike 4.0 International License</a>.
  315. </p>
  316. </div>
  317. </div>
  318. </div>
  319. </div>
  320. </footer>
  321. </body>
  322. <style>
  323. .buttonGroup {
  324. text-align: center;
  325. }
  326. .buttonGroup>button {
  327. padding: 15px;
  328. color: white;
  329. background-color: #363636;
  330. border-radius: 5px;
  331. }
  332. .buttonGroup>button:hover {
  333. box-shadow: 5px;
  334. }
  335. </style>
  336. </html>