# The following lines list known AI, data-scraping, and plagiarism-checking crawlers. # They are disallowed to prevent content from being used for AI training and to avoid # incorrect plagiarism flags on original academic and written work. User-agent: AnthropicAI User-agent: TurnitinBot User-agent: UnicheckBot User-agent: PlagScan User-agent: PlagTracker User-agent: QueText User-agent: Plagiarisma User-agent: Copyscape User-agent: Scribbr-bot User-agent: DrillBitBot User-agent: OpenAI User-agent: Sogou User-agent: AhrefsBot User-agent: SemrushBot User-agent: ia_archiver User-agent: AI2Bot User-agent: Ai2Bot-Dolma User-agent: Amazonbot User-agent: anthropic-ai User-agent: Applebot User-agent: Applebot-Extended User-agent: Bytespider User-agent: CCBot User-agent: ChatGPT-User User-agent: Claude-Web User-agent: ClaudeBot User-agent: cohere-ai User-agent: cohere-training-data-crawler User-agent: Crawlspace User-agent: Diffbot User-agent: DuckAssistBot User-agent: FacebookBot User-agent: FriendlyCrawler User-agent: Google-Extended User-agent: GoogleOther User-agent: GoogleOther-Image User-agent: GoogleOther-Video User-agent: GPTBot User-agent: iaskspider/2.0 User-agent: ICC-Crawler User-agent: ImagesiftBot User-agent: img2dataset User-agent: ISSCyberRiskCrawler User-agent: Kangaroo Bot User-agent: Meta-ExternalAgent User-agent: Meta-ExternalFetcher User-agent: OAI-SearchBot User-agent: omgili User-agent: omgilibot User-agent: PanguBot User-agent: PerplexityBot User-agent: Perplexity-User-Agent User-agent: PetalBot User-agent: Scrapy User-agent: SemrushBot-OCOB User-agent: SemrushBot-SWA User-agent: Sidetrade indexer bot User-agent: Timpibot User-agent: Seekr User-agent: VelenPublicWebCrawler User-agent: Webzio-Extended User-agent: YouBot User-agent: yandex Disallow: / # The following lines explicitly allow all other user agents to crawl the entire site. # This is the default behavior, but it's good practice to be explicit. # The _assets directory is disallowed to prevent indexing of style, script, and image files. User-agent: * Disallow: /_assets/ # Disallow crawling of the assets directory # Provide the location of the sitemap for all crawlers. Sitemap: https://prathameshdeshmukh.site/sitemap.xml