123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380 |
- <!DOCTYPE html>
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
- <!-- Replace the content tag with appropriate information -->
- <meta name="description" content="DESCRIPTION META TAG">
- <meta property="og:title" content="SOCIAL MEDIA TITLE TAG" />
- <meta property="og:description" content="SOCIAL MEDIA DESCRIPTION TAG TAG" />
- <meta property="og:url" content="URL OF THE WEBSITE" />
- <!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
- <meta property="og:image" content="static/image/your_banner_image.png" />
- <meta property="og:image:width" content="1200" />
- <meta property="og:image:height" content="630" />
- <meta name="twitter:title" content="TWITTER BANNER TITLE META TAG">
- <meta name="twitter:description" content="TWITTER BANNER DESCRIPTION META TAG">
- <!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
- <meta name="twitter:image" content="static/images/your_twitter_banner_image.png">
- <meta name="twitter:card" content="summary_large_image">
- <!-- Keywords for your paper to be indexed by-->
- <meta name="keywords" content="KEYWORDS SHOULD BE PLACED HERE">
- <meta name="viewport" content="width=device-width, initial-scale=1">
- <title>OmniParser</title>
- <!-- <link rel="icon" type="image/x-icon" href="static/images/mammoth_icon.png"> -->
- <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro" rel="stylesheet">
- <link rel="stylesheet" href="static/css/bulma.min.css">
- <link rel="stylesheet" href="static/css/bulma-carousel.min.css">
- <link rel="stylesheet" href="static/css/bulma-slider.min.css">
- <link rel="stylesheet" href="static/css/fontawesome.all.min.css">
- <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
- <link rel="stylesheet" href="static/css/index.css">
- <script src="static/js/jquery.min.js"></script>
- <script src="static/js/main.js"></script>
- <script defer src="static/js/fontawesome.all.min.js"></script>
- <script src="static/js/bulma-carousel.min.js"></script>
- <script src="static/js/bulma-slider.min.js"></script>
- <script src="static/js/index.js"></script>
- <link rel="stylesheet" type="text/css" href="static/css/jquery.dataTables.css">
- <script type="text/javascript" charset="utf8" src="static/js/jquery-3.5.1.js"></script>
- <script type="text/javascript" charset="utf8" src="static/js/jquery.dataTables.js"></script>
- </head>
- <body>
- <section class="hero">
- <div class="hero-body">
- <div class="container is-max-desktop">
- <div class="columns is-centered">
- <div class="column has-text-centered">
- <h1 class="title is-1 publication-title">OmniParser for Pure Vision Based GUI Agent</h1>
- <div class="is-size-5 publication-authors">
- <!-- Paper authors -->
- <span class="author-block">
- <a href="https://adamlu123.github.io/" target="_blank">Yadong Lu</a><sup>1</sup>,
- </span>
- <span class="author-block">
- <a href="https://jwyang.github.io/" target="_blank">Jianwei Yang</a><sup>1</sup>,
- </span>
- <span class="author-block">
- <a href="https://scholar.google.com/citations?user=S6OFEFEAAAAJ&hl=en" target="_blank">Yelong Shen</a><sup>2</sup>,
- </span>
- <span class="author-block">
- <a href="https://www.microsoft.com/en-us/research/people/hassanam/?from=https://research.microsoft.com/en-us/um/people/hassanam/&type=exact" target="_blank">Ahmed Awadallah</a><sup>1</sup>,
- </span>
- </div>
- <div class="is-size-5 publication-authors">
- <span class="author-block">
- <sup>1</sup>Microsoft Research,
- <sup>2</sup>Microsoft Gen AI,<br>
- </div>
- <div class="column has-text-centered">
- <div class="publication-links">
- <!-- Github link -->
- <span class="link-block">
- <a href="https://github.com/microsoft/OmniParser" target="_blank"
- class="external-link button is-normal is-rounded is-dark">
- <span class="icon">
- <i class="fab fa-github"></i>
- </span>
- <span>Code</span>
- </a>
- </span>
- <!-- ArXiv abstract Link -->
- <span class="link-block">
- <a href="https://arxiv.org/pdf/2408.00203" target="_blank"
- class="external-link button is-normal is-rounded is-dark">
- <span class="icon">
- <i class="ai ai-arxiv"></i>
- </span>
- <span>arXiv</span>
- </a>
- </span>
- <span class="link-block">
- <a href="https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/"
- class="external-link button is-normal is-rounded is-dark">
- <span class="icon">
- <i class="fas fa-book"></i>
- </span>
- <span>Blog post</span>
- </a>
- </span>
- <!-- Demo link -->
- <!-- <span class="link-block"> -->
- <!-- <a href="https://46ea00cff2fc968db7.gradio.live" target="_blank" -->
- <!-- class="external-link button is-normal is-rounded is-dark"> -->
- <!-- <span class="icon"> -->
- <!-- <i class="fab fa-github"></i> -->
- <!-- </span> -->
- <!-- <span>Demo</span>
- </a>
- </span>
- -->
- </div>
- </div>
- </div>
- </div>
- </div>
- </div>
- </section>
- <!-- Paper abstract -->
- <section class="hero">
- <div class="hero-body">
- <div class="container is-max-desktop">
- <div class="columns is-centered">
- <div class="column has-text-centered">
- <h2 class="title is-3">Abstract</h2>
- <div class="content has-text-justified">
- <p>
- The recent success of large vision language models shows great potential in driving the agent system operating on user interfaces. However, we argue that the
- power multimodal models like GPT-4V as a general agent on multiple operating
- systems across different applications is largely underestimated due to the lack of
- a robust screen parsing technique capable of: 1. reliably identifying interactable
- icons within the user interface, and 2. understanding the semantics of various
- elements in a screenshot and accurately associate the intended action with the
- corresponding region on the screen. To fill these gaps, we introduce OMNIPARSER,
- a comprehensive method for parsing user interface screenshots into structured
- elements, which significantly enhances the ability of GPT-4V to generate actions
- that can be accurately grounded in the corresponding regions of the interface. We
- first curated an interactable icon detection dataset using popular webpages and
- an icon description dataset. These datasets were utilized to fine-tune specialized
- models: a detection model to parse interactable regions on the screen and a caption
- model to extract the functional semantics of the detected elements. OMNIPARSER
- significantly improves GPT-4V's performance on ScreenSpot benchmark. And
- on Mind2Web and AITW benchmark, OMNIPARSER with screenshot only input
- outperforms the GPT-4V baselines requiring additional information outside of
- screenshot
- </p>
- </div>
- </div>
- </div>
- </div>
- </div>
- </section>
- <!-- End paper abstract -->
- <!-- Image carousel -->
- <section class="hero">
- <div class="hero-body">
- <div class="container is-max-desktop">
- <div class="columns is-centered">
- <!-- <div class="column is-four-fifths"> -->
- <div class="item">
- <!-- Your image here -->
- <img src="static/images/flow_merged0.png" alt="Result mobile" />
- <img src="static/images/flow_merged1.png" alt="Result mobile" />
- <img src="static/images/flow_merged2.png" alt="Result mobile" />
- <h2 class="subtitle">
- Examples of parsed screenshot image and local semantics by OmniParser. The inputs to OmniParse are user task and UI screenshot, from which it will produce: 1. parsed screenshot image with bounding boxes and numeric IDs overlayed, and 2. local semantics contains both text extracted and icon description.
- </h2>
- </div>
- <!-- </div> -->
- </div>
- </div>
- </div>
- </section>
- <!-- End image carousel -->
- <section class="hero is-small">
- <div class="hero-body">
- <div class="container is-max-desktop">
- <div class="columns is-centered has-text-centered">
- <div class="column is-four-fifths">
- <h2 class="title is-3">Curated Dataset for Interactable Region Detection and Icon Functionality Description</h2>
- <br>
- We curate a dataset of interactable icon detection dataset, containing 67k unique screenshot images, each labeled with bounding boxes of interactable icons derived from DOM tree. We first took a 100k uniform sample of popular publicly availabe urls on the clueweb dataset, and collect bounding boxes of interactable regions of the webpage from the DOM tree of each urls. We also collected 7k icon-description
- pairs for finetuning the caption model.
- <div class="item">
- <img src="static/images/curated_data.png" alt="Species Classification results on iWildCam2020-WILDS (OOD) dataset" />
- <p>
- <b>Examples from the Interactable Region Detection dataset. </b>. TThe bounding boxes are based on the interactable region extracted from the DOM tree of the webpage.
- </p>
- </div>
- </div>
- </div>
- </div>
- </div>
- </section>
- <!--
- <section class="hero is-light is-small">
- <div class="hero-body">
- <div class="container">
- <div id="results-carousel" class="carousel results-carousel">
- <div class="item item-steve">
- <video poster="" id="steve" autoplay controls muted loop playsinline height="100%">
- <source src="./static/videos/web_5_demo_nocap.mp4"
- type="video/mp4">
- </video>
- </div>
- <div class="item item-chair-tp">
- <video poster="" id="chair-tp" autoplay controls muted loop playsinline height="100%">
- <source src="./static/videos/web_22_demo_nocap.mp4"
- type="video/mp4">
- </video>
- </div>
- </div>
- </div>
- </div>
- </section> -->
-
- <section class="hero is-small">
- <div class="hero-body">
- <div class="container is-max-desktop">
- <div class="columns is-centered has-text-centered">
- <div class="column is-four-fifths">
- <h2 class="title is-3">Results</h2>
- <br>
- We evaluate our model on SeeClick, Mind2Web, and AITW benchmarks. We show that our model outperforms the GPT-4V baseline on all benchmarks. We also show that our model with screenshot only input outperforms the GPT-4V baselines requiring additional information outside of screenshot.
- <div class="item">
- <img src="static/images/seeclick.png" alt="seeclick" />
- <img src="static/images/m2w.png" alt="mind2web" />
- <img src="static/images/aitw.png" alt="aitw" />
- </div>
- </div>
- </div>
- </div>
- </div>
- </section>
- <section class="hero is-small"></section>
- <div class="hero-body">
- <div class="container is-max-desktop">
- <div class="columns is-centered has-text-centered">
- <div class="column is-four-fifths">
- <h2 class="title is-3">Plugin-ready for Other Vision Language Models</h2>
- <br>
- To further demonstrate OmniParser is a plugin choice for off-the-shelf vision langauge models, we show the performance of OmniParser combined with recently announced vision language models: Phi-3.5-V and Llama-3.2-V. As seen in table, our finetuned interactable region detection (ID) model significantly improves the task performance compared to grounding dino model (w.o. ID) with local semantics across all subcategories for GPT-4V, Phi-3.5-V and Llama-3.2-V. In addition, the local semantics of icon functionality helps significantly with the performance for every vision language model.
- In the table, LS is short for local semantics of icon functionality, ID is short for the interactable region detection model we finetune. The setting w.o. ID means we replace the ID model with original Grounding DINO model not finetuned on our data, and with local semantics. The setting w.o. ID and w.o LS means we use Grounding DINO model, and further without using the icon description in the text prompt.
- <div class="item">
- <img src="static/images/ablation.png" alt="seeclick" />
- </div>
- </div>
- </div>
- </div>
- </div>
- </section>
- <section class="section">
- <div class="container is-max-desktop">
- <h2 class="title is-3">Demo of Mind2Web Tasks </h2>
-
- <div class="columns is-centered">
-
- <!-- Visual Effects. -->
- <div class="column">
- <div class="content">
- <!-- <h2 class="title is-3">Visual Effects</h2> -->
- <!-- <p>
- Using <i>nerfies</i> you can create fun visual effects. This Dolly zoom effect
- would be impossible without nerfies since it would require going through a wall.
- </p> -->
- <video id="dollyzoom" autoplay controls muted loop playsinline height="100%">
- <source src="./static/videos/web_5_demo_nocap.mp4"
- type="video/mp4">
- </video>
- </div>
- </div>
- <!--/ Visual Effects. -->
-
- <!-- Matting. -->
- <div class="column">
- <!-- <h2 class="title is-3">Matting</h2> -->
- <div class="columns is-centered">
- <div class="column content">
- <!-- <p>
- As a byproduct of our method, we can also solve the matting problem by ignoring
- samples that fall outside of a bounding box during rendering.
- </p> -->
- <video id="matting-video" controls playsinline height="100%">
- <source src="./static/videos/web_22_demo_nocap.mp4"
- type="video/mp4">
- </video>
- </div>
-
- </div>
- </div>
- </div>
- </section>
- <br>
- <!-- BibTex citation -->
- <section class="section" id="BibTeX">
- <div class="container is-max-desktop content">
- <h2 class="title">Citation</h2>
- <!-- Please cite our paper if you use our code, data, model or results: -->
- <!-- <br> -->
- <pre><code>@misc{lu2024omniparserpurevisionbased,
- title={OmniParser for Pure Vision Based GUI Agent},
- author={Yadong Lu and Jianwei Yang and Yelong Shen and Ahmed Awadallah},
- year={2024},
- eprint={2408.00203},
- archivePrefix={arXiv},
- primaryClass={cs.CV},
- url={https://arxiv.org/abs/2408.00203},
- }
- </code></pre>
- </div>
- </section>
- <footer class="footer">
- <div class="container">
- <div class="columns is-centered">
- <div class="column is-8">
- <div class="content">
- <p>
- This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page. You are free to borrow the of this website, we just ask that you link back to this page in the footer. <br> This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"
- target="_blank">Creative
- Commons Attribution-ShareAlike 4.0 International License</a>.
- </p>
- </div>
- </div>
- </div>
- </div>
- </footer>
- </body>
- <style>
- .buttonGroup {
- text-align: center;
- }
-
- .buttonGroup>button {
- padding: 15px;
- color: white;
- background-color: #363636;
- border-radius: 5px;
- }
-
- .buttonGroup>button:hover {
- box-shadow: 5px;
- }
- </style>
- </html>
|