diff --git a/packages/app/src/app/design-explore/page.tsx b/packages/app/src/app/design-explore/page.tsx new file mode 100644 index 00000000..4a35d2d1 --- /dev/null +++ b/packages/app/src/app/design-explore/page.tsx @@ -0,0 +1,152 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const VARIANTS = [ + { + id: 'A', + route: '/landing/about', + title: 'Hero + Grid', + description: + 'Classic hero header with brand CTA, 3-column preset cards with tags, and a 6-pill navigation row.', + }, + { + id: 'B', + route: '/landing/methodology', + title: 'Stats + Full Presets', + description: + 'Four stat counters (GPUs, models, frequency, open source) above all 6 curated presets in a 3-column grid.', + }, + { + id: 'C', + route: '/landing/infrastructure', + title: 'Quotes + GPU List', + description: + 'Three executive quote cards (OpenAI, NVIDIA, AMD) with a hardware chip wall showing all benchmarked GPUs.', + }, + { + id: 'D', + route: '/landing/open-source', + title: 'Split Presets + Nav', + description: + 'Two-column preset layout split by NVIDIA vs Cross-Vendor, plus a 3-column navigation directory.', + }, + { + id: 'E', + route: '/landing/partners', + title: 'Supporters Wall', + description: + 'Full 22-org supporter chip wall with a featured OpenAI quote in a large brand-tinted banner.', + }, + { + id: 'F', + route: '/landing/careers', + title: 'Quote Cards + Compact Presets', + description: + 'Three side-by-side quote cards with pill-shaped preset links and inline article/newsletter links.', + }, + { + id: 'G', + route: '/landing/changelog', + title: 'Feature Cards + Quote Banner', + description: + 'Four large feature cards (Dashboard, Comparisons, Supporters, Articles) with hover arrows and a quote banner.', + }, + { + id: 'H', + route: '/landing/status', + title: 'Full Preset Cards + Tags', + description: + 'All 6 presets as tagged cards with brand accent bars (matching OG CuratedViewCard style) plus an NVIDIA quote.', + }, + { + id: 'I', + route: '/landing/contact', + title: 'Section Directory', + description: + 'Two inline quote banners above a 6-card section directory linking to Dashboard, Comparisons, Supporters, Articles, Reliability, and GitHub.', + }, + { + id: 'J', + route: '/landing/faq', + title: 'GPU Focus + Dense Layout', + description: + 'GPU-centric hero with hardware stat cards, a chip wall, dense 3x2 preset grid, and a Jensen Huang quote.', + }, +]; + +export const metadata: Metadata = { + title: 'Design Exploration — Landing Page Variants', + description: + 'Explore 10 landing page design variants for InferenceX, each using the glass card aesthetic with real content.', + alternates: { canonical: `${SITE_URL}/design-explore` }, +}; + +export default function DesignExplorePage() { + return ( +
+
+ +
+

+ Design Exploration +

+

+ 10 landing page variants. +

+

+ Each variant uses the glass card aesthetic from the land acknowledgement page while + showcasing the real InferenceX landing page content — quotes, preset links, GPU + hardware, navigation, and CTAs — in a different layout. +

+
+ +
+ {VARIANTS.map((v) => ( + +
+
+
+ + {v.id} + +

+ {v.title} +

+
+ +
+

{v.description}

+

+ {v.route} +

+ + ))} +
+ +
+ + ← Current Landing Page + + + Land Acknowledgement (style reference) → + +
+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/about/page.tsx b/packages/app/src/app/landing/about/page.tsx new file mode 100644 index 00000000..447f5e0d --- /dev/null +++ b/packages/app/src/app/landing/about/page.tsx @@ -0,0 +1,128 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const PRESETS = [ + { + title: 'GB200 NVL72 vs B200 — Multi vs Single Node', + description: 'GB200 NVL72 Dynamo TRT vs B200 Dynamo TRT on DeepSeek R1 (8k/1k) at FP4.', + href: '/inference?preset=gb200-vs-b200', + tags: ['DeepSeek', 'GB200', 'B200', 'FP4'], + }, + { + title: 'B200 vs H200 — Blackwell vs Hopper', + description: + 'Blackwell B200 vs Hopper H200 Dynamo TRT throughput per GPU on DeepSeek R1 at FP8.', + href: '/inference?preset=b200-vs-h200', + tags: ['DeepSeek', 'B200', 'H200', 'FP8'], + }, + { + title: 'AMD MI300X → MI325X → MI355X', + description: 'Three generations of AMD Instinct on SGLang at FP8.', + href: '/inference?preset=amd-generations', + tags: ['DeepSeek', 'MI300X', 'MI355X', 'SGLang'], + }, +]; + +const LINKS = [ + { label: 'Dashboard', href: '/inference' }, + { label: 'Supporters', href: '/quotes' }, + { label: 'Articles', href: '/blog' }, + { label: 'GPU Reliability', href: '/reliability' }, + { label: 'GitHub', href: 'https://github.com/SemiAnalysisAI/InferenceX' }, + { label: 'Newsletter', href: 'https://newsletter.semianalysis.com' }, +]; + +export const metadata: Metadata = { + title: 'Landing Variant A — Hero + Grid', + description: 'InferenceX landing page variant with hero header and preset grid layout.', + alternates: { canonical: `${SITE_URL}/landing/about` }, +}; + +export default function VariantA() { + return ( +
+
+ + {/* Hero */} +
+

+ Open Source Benchmark +

+

+ The open standard for ML inference benchmarking. +

+

+ Compare AI inference performance across GPUs and frameworks. Real benchmarks on NVIDIA + GB200, B200, AMD MI355X, and more. Free, open-source, continuously updated by + SemiAnalysis. +

+
+ + Open Dashboard + + +
+
+ + {/* Quick Comparisons */} +
+ {PRESETS.map((p) => ( + +
+

+ {p.title} +

+ +
+

{p.description}

+
+ {p.tags.map((t) => ( + + {t} + + ))} +
+ + ))} +
+ + {/* Navigation Links */} +
+ {LINKS.map((l) => ( + + {l.label} + + ))} +
+ +

+ Continuous open-source inference benchmarking. Real-world, reproducible, auditable + performance data trusted by trillion dollar AI infrastructure operators like OpenAI, + Oracle, Microsoft, etc. +

+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/careers/page.tsx b/packages/app/src/app/landing/careers/page.tsx new file mode 100644 index 00000000..704fecb4 --- /dev/null +++ b/packages/app/src/app/landing/careers/page.tsx @@ -0,0 +1,126 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight, Quote } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const QUOTES = [ + { + text: 'Inference demand is growing exponentially, driven by long-context reasoning. NVIDIA Grace Blackwell NVL72 was invented for this new era of thinking AI.', + name: 'Jensen Huang', + title: 'Founder & CEO, NVIDIA', + org: 'NVIDIA', + }, + { + text: 'Our mission at Azure is to give customers the most performant, efficient, and cost-effective cloud for AI. SemiAnalysis InferenceMAX supports that mission by providing transparent, reproducible benchmarks.', + name: 'Scott Guthrie', + title: 'EVP Cloud & AI, Microsoft', + org: 'Microsoft', + }, + { + text: "Speed is the moat. InferenceMAX's nightly benchmarks match the speed of improvement of the AMD software stack.", + name: 'Anush Elangovan', + title: 'VP GPU Software, AMD', + org: 'AMD', + }, +]; + +const PRESETS = [ + { title: 'GB200 vs B200', href: '/inference?preset=gb200-vs-b200' }, + { title: 'B200 vs H200', href: '/inference?preset=b200-vs-h200' }, + { title: 'AMD Generations', href: '/inference?preset=amd-generations' }, + { title: 'H100 vs GB300', href: '/inference?preset=h100-vs-gb300-disagg' }, + { title: 'Disagg Cross-Vendor', href: '/inference?preset=disagg-b200-vs-mi355x' }, + { title: 'MI355X Timeline', href: '/inference?preset=mi355x-sglang-disagg-timeline' }, +]; + +export const metadata: Metadata = { + title: 'Landing Variant F — Quote Cards + Compact Presets', + description: 'InferenceX landing page variant with three quote cards and compact preset links.', + alternates: { canonical: `${SITE_URL}/landing/careers` }, +}; + +export default function VariantF() { + return ( +
+
+ +
+

+ InferenceX by SemiAnalysis +

+

+ Open Source Continuous Inference Benchmark +

+

+ Compare AI inference performance across GPUs and frameworks. Real benchmarks on NVIDIA + GB200, B200, AMD MI355X, and more. Trusted by trillion dollar AI infrastructure + operators like OpenAI, Oracle, Microsoft. +

+
+ + Open Dashboard + + +
+
+ + {/* Quote cards */} +
+ {QUOTES.map((q) => ( +
+ +

+ “{q.text}” +

+
+

{q.name}

+

{q.title}

+
+
+ ))} +
+ + {/* Compact preset links */} +
+

+ Quick Comparisons +

+
+ {PRESETS.map((p) => ( + + {p.title} + + + ))} +
+
+ +

+ Continuous open-source inference benchmarking. Real-world, reproducible, auditable + performance data.{' '} + + Read our articles + {' '} + or{' '} + + subscribe to the newsletter + + . +

+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/changelog/page.tsx b/packages/app/src/app/landing/changelog/page.tsx new file mode 100644 index 00000000..0c5d7274 --- /dev/null +++ b/packages/app/src/app/landing/changelog/page.tsx @@ -0,0 +1,127 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const HERO_QUOTE = { + text: "InferenceMAX's nightly results highlight the rapid pace of progress in the AMD software stack. It's exciting to witness the birth of an open project that provides a tied feedback loop between what the software team works on and how it affects specific ML use cases.", + name: 'Quentin Colombet', + title: 'Senior Director, AMD', +}; + +const FEATURES = [ + { + label: 'Full Dashboard', + title: 'Every Model, GPU & Metric', + description: + 'Fully configurable inference benchmark charts with date ranges, concurrency sweeps, and raw data export. Compare NVIDIA B200, H200, H100, AMD MI355X, MI325X, MI300X and more.', + href: '/inference', + }, + { + label: 'Quick Comparisons', + title: 'Curated GPU Benchmarks', + description: + 'Jump straight into the most popular GPU inference benchmark comparisons — GB200 vs B200, AMD generations, disaggregated serving, and more.', + href: '/inference?preset=gb200-vs-b200', + }, + { + label: 'Supporters', + title: '36+ Industry Quotes', + description: + 'Endorsed by executives from OpenAI, NVIDIA, AMD, Microsoft, Meta, Hugging Face, and 30 more organizations building AI infrastructure.', + href: '/quotes', + }, + { + label: 'Articles', + title: 'Deep-Dive Analysis', + description: + 'In-depth write-ups on GPU performance, benchmark methodology, and inference optimization from the SemiAnalysis research team.', + href: '/blog', + }, +]; + +const BOTTOM_LINKS = [ + { label: 'GitHub — Benchmarks', href: 'https://github.com/SemiAnalysisAI/InferenceX' }, + { label: 'GitHub — Frontend', href: 'https://github.com/SemiAnalysisAI/InferenceX-app' }, + { label: 'Newsletter', href: 'https://newsletter.semianalysis.com' }, + { label: 'GPU Reliability', href: '/reliability' }, + { label: 'SemiAnalysis', href: 'https://semianalysis.com' }, +]; + +export const metadata: Metadata = { + title: 'Landing Variant G — Feature Cards + Quote Banner', + description: 'InferenceX landing page variant with feature cards and quote banner.', + alternates: { canonical: `${SITE_URL}/landing/changelog` }, +}; + +export default function VariantG() { + return ( +
+
+ +
+

+ Open Source Benchmark +

+

+ AI Inference Benchmark by SemiAnalysis +

+

+ InferenceX is the open-source AI inference benchmark that matches the rapid pace of + modern AI development. Powered by one of the largest open-source GPU CI/CD fleets with + NVIDIA GB200, AMD MI355X & many more. +

+
+ + {/* Quote banner */} +
+

+ “{HERO_QUOTE.text}” +

+

+ — {HERO_QUOTE.name}, {HERO_QUOTE.title} +

+
+ + {/* Feature cards */} +
+ {FEATURES.map((f) => ( + +

+ {f.label} +

+

+ {f.title} +

+

{f.description}

+
+ Explore + +
+ + ))} +
+ + {/* Bottom links */} +
+ {BOTTOM_LINKS.map((l) => ( + + {l.label} + + ))} +
+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/contact/page.tsx b/packages/app/src/app/landing/contact/page.tsx new file mode 100644 index 00000000..d4c2d5aa --- /dev/null +++ b/packages/app/src/app/landing/contact/page.tsx @@ -0,0 +1,149 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight, Quote } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const QUOTES = [ + { + text: 'Open collaboration is driving the next era of AI innovation. The open-source InferenceMAX benchmark gives the community transparent, nightly results that inspire trust and accelerate progress.', + name: 'Dr. Lisa Su', + title: 'Chair and CEO, AMD', + }, + { + text: "As we build systems at unprecedented scale, it's critical for the ML community to have open, transparent benchmarks that reflect how inference really performs.", + name: 'Peter Hoeschele', + title: 'VP Infrastructure, OpenAI Stargate', + }, +]; + +const SECTIONS = [ + { + label: 'Benchmark', + title: 'Full Dashboard', + description: + 'Every model, GPU, framework, and metric. Fully configurable charts with date ranges, concurrency sweeps, and raw data export.', + href: '/inference', + cta: 'Open Dashboard', + }, + { + label: 'Compare', + title: 'Quick Comparisons', + description: + 'Jump straight into the most popular GPU inference benchmark comparisons, curated and ready to explore.', + href: '/inference?preset=gb200-vs-b200', + cta: 'GB200 vs B200', + }, + { + label: 'Community', + title: 'Supporters', + description: + 'Endorsed by 36+ industry leaders including OpenAI, NVIDIA, AMD, Microsoft, Meta, Hugging Face, and more.', + href: '/quotes', + cta: 'See Supporters', + }, + { + label: 'Insights', + title: 'Articles & Analysis', + description: + 'In-depth write-ups on GPU performance, benchmark methodology, and inference optimization.', + href: '/blog', + cta: 'Read Articles', + }, + { + label: 'Reliability', + title: 'GPU Reliability Data', + description: 'Production reliability metrics for GPU hardware across our benchmark clusters.', + href: '/reliability', + cta: 'View Data', + }, + { + label: 'Open Source', + title: 'Contribute on GitHub', + description: + 'The benchmark runner, dashboard, and all configs are open source under Apache 2.0. Fork, fix, extend.', + href: 'https://github.com/SemiAnalysisAI/InferenceX', + cta: 'Star on GitHub', + }, +]; + +export const metadata: Metadata = { + title: 'Landing Variant I — Section Directory', + description: 'InferenceX landing page variant with a full section directory and inline quotes.', + alternates: { canonical: `${SITE_URL}/landing/contact` }, +}; + +export default function VariantI() { + return ( +
+
+ +
+

+ InferenceX by SemiAnalysis +

+

+ The open-source AI inference benchmark. +

+

+ Compare AI inference performance across GPUs and frameworks. Real benchmarks on NVIDIA + GB200, B200, AMD MI355X, and more. Free, open-source, continuously updated. +

+
+ + {/* Inline quotes */} +
+ {QUOTES.map((q) => ( +
+ +

“{q.text}”

+

+ — {q.name}, {q.title} +

+
+ ))} +
+ + {/* Section directory */} +
+ {SECTIONS.map((s) => ( + +

+ {s.label} +

+

+ {s.title} +

+

{s.description}

+
+ {s.cta} + +
+ + ))} +
+ +

+ Continuous open-source inference benchmarking by{' '} + + SemiAnalysis + + . Subscribe to the{' '} + + newsletter + {' '} + for weekly updates. +

+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/faq/page.tsx b/packages/app/src/app/landing/faq/page.tsx new file mode 100644 index 00000000..9d01221c --- /dev/null +++ b/packages/app/src/app/landing/faq/page.tsx @@ -0,0 +1,183 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight, Quote } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const HERO_STATS = [ + { value: 'NVIDIA GB200', sub: 'B200 · H200 · H100' }, + { value: 'AMD MI355X', sub: 'MI325X · MI300X' }, +]; + +const GPUS = [ + 'NVIDIA GB200 NVL72', + 'NVIDIA GB300', + 'NVIDIA B200', + 'NVIDIA H200', + 'NVIDIA H100', + 'AMD MI355X', + 'AMD MI325X', + 'AMD MI300X', +]; + +const PRESETS = [ + { + title: 'GB200 NVL72 vs B200', + href: '/inference?preset=gb200-vs-b200', + description: 'Multi vs Single Node at FP4.', + }, + { + title: 'B200 vs H200', + href: '/inference?preset=b200-vs-h200', + description: 'Blackwell vs Hopper at FP8.', + }, + { + title: 'AMD MI300X → MI355X', + href: '/inference?preset=amd-generations', + description: 'Three AMD generations.', + }, + { + title: 'H100 vs GB300 Disagg', + href: '/inference?preset=h100-vs-gb300-disagg', + description: 'Cross-generation disagg.', + }, + { + title: 'Disagg B200 vs MI355X', + href: '/inference?preset=disagg-b200-vs-mi355x', + description: 'Cross-vendor disagg.', + }, + { + title: 'MI355X Over Time', + href: '/inference?preset=mi355x-sglang-disagg-timeline', + description: 'Throughput timeline.', + }, +]; + +const QUOTE = { + text: 'Inference demand is growing exponentially, driven by long-context reasoning. NVIDIA Grace Blackwell NVL72 was invented for this new era of thinking AI. By benchmarking frequently, InferenceMAX gives the industry a transparent view of LLM inference performance on real-world workloads.', + name: 'Jensen Huang', + title: 'Founder & CEO, NVIDIA', +}; + +export const metadata: Metadata = { + title: 'Landing Variant J — GPU Focus + Dense Layout', + description: + 'InferenceX landing page variant with GPU-centric layout, dense presets, and hardware chips.', + alternates: { canonical: `${SITE_URL}/landing/faq` }, +}; + +export default function VariantJ() { + return ( +
+
+ +
+

+ InferenceX +

+

+ Real-world GPU inference benchmarks. Updated daily. +

+
+ {HERO_STATS.map((s) => ( +
+

{s.value}

+

{s.sub}

+
+ ))} +
+
+ + {/* GPU chips */} +
+

+ Hardware We Benchmark +

+
+ {GPUS.map((g) => ( + + {g} + + ))} +
+
+ + {/* Dense presets grid */} +
+ {PRESETS.map((p) => ( + +
+

+ {p.title} +

+ +
+

{p.description}

+ + ))} +
+ + {/* Quote */} +
+ +

+ “{QUOTE.text}” +

+

+ — {QUOTE.name}, {QUOTE.title} +

+
+ + {/* Bottom nav */} +
+ + Full Dashboard + + + + Supporters → + + + Articles → + + + Reliability → + + + GitHub → + + + Newsletter → + +
+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/infrastructure/page.tsx b/packages/app/src/app/landing/infrastructure/page.tsx new file mode 100644 index 00000000..3513d248 --- /dev/null +++ b/packages/app/src/app/landing/infrastructure/page.tsx @@ -0,0 +1,128 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const QUOTES = [ + { + text: "As we build systems at unprecedented scale, it's critical for the ML community to have open, transparent benchmarks that reflect how inference really performs across hardware and software.", + name: 'Peter Hoeschele', + title: 'VP of Infrastructure, OpenAI Stargate', + org: 'OpenAI', + }, + { + text: 'Inference demand is growing exponentially, driven by long-context reasoning. NVIDIA Grace Blackwell NVL72 was invented for this new era of thinking AI.', + name: 'Jensen Huang', + title: 'Founder & CEO, NVIDIA', + org: 'NVIDIA', + }, + { + text: 'Open collaboration is driving the next era of AI innovation. The open-source InferenceMAX benchmark gives the community transparent, nightly results.', + name: 'Dr. Lisa Su', + title: 'Chair and CEO, AMD', + org: 'AMD', + }, +]; + +const GPUS = [ + 'NVIDIA GB200 NVL72', + 'NVIDIA B200', + 'NVIDIA H200', + 'NVIDIA H100', + 'AMD MI355X', + 'AMD MI325X', + 'AMD MI300X', +]; + +export const metadata: Metadata = { + title: 'Landing Variant C — Quotes + GPU List', + description: 'InferenceX landing page variant featuring executive quotes and GPU hardware list.', + alternates: { canonical: `${SITE_URL}/landing/infrastructure` }, +}; + +export default function VariantC() { + return ( +
+
+ + {/* Hero */} +
+

+ Trusted by Industry Leaders +

+

+ Trusted by GigaWatt Token Factories +

+

+ Compare AI inference performance across GPUs and frameworks. Real benchmarks on NVIDIA + GB200, B200, AMD MI355X, and more. Free, open-source, continuously updated. +

+
+ + {/* Executive quotes */} +
+ {QUOTES.map((q) => ( +
+

+ {q.org} +

+

+ “{q.text}” +

+
+

{q.name}

+

{q.title}

+
+
+ ))} +
+ + {/* GPU hardware list */} +
+

+ Hardware We Benchmark +

+
+ {GPUS.map((gpu) => ( + + {gpu} + + ))} +
+
+ + {/* CTAs */} +
+ + Open Dashboard + + + + See all 36 supporters → + + + Star on GitHub → + +
+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/methodology/page.tsx b/packages/app/src/app/landing/methodology/page.tsx new file mode 100644 index 00000000..151562c5 --- /dev/null +++ b/packages/app/src/app/landing/methodology/page.tsx @@ -0,0 +1,135 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const STATS = [ + { value: '6+', label: 'GPU Architectures' }, + { value: '10+', label: 'Models Benchmarked' }, + { value: 'Daily', label: 'Update Frequency' }, + { value: '100%', label: 'Open Source' }, +]; + +const PRESETS = [ + { + title: 'GB200 NVL72 vs B200', + description: 'Multi vs Single Node — Dynamo TRT on DeepSeek R1 at FP4.', + href: '/inference?preset=gb200-vs-b200', + }, + { + title: 'B200 vs H200', + description: 'Blackwell vs Hopper — throughput per GPU on DeepSeek R1 at FP8.', + href: '/inference?preset=b200-vs-h200', + }, + { + title: 'AMD MI300X → MI325X → MI355X', + description: 'Three generations of AMD Instinct on SGLang at FP8.', + href: '/inference?preset=amd-generations', + }, + { + title: 'H100 vs GB300 Disagg', + description: 'H100 FP8 vs GB300 FP8 vs GB300 FP4 disagg on DeepSeek R1.', + href: '/inference?preset=h100-vs-gb300-disagg', + }, + { + title: 'Disagg B200 vs MI355X vs B200 TRT', + description: 'Cross-vendor disaggregated serving comparison at FP8.', + href: '/inference?preset=disagg-b200-vs-mi355x', + }, + { + title: 'MI355X SGLang Disagg Over Time', + description: 'Tracks throughput improvements over time on DeepSeek R1 FP8.', + href: '/inference?preset=mi355x-sglang-disagg-timeline', + }, +]; + +export const metadata: Metadata = { + title: 'Landing Variant B — Stats + Full Presets', + description: 'InferenceX landing page variant with stat counters and all 6 curated presets.', + alternates: { canonical: `${SITE_URL}/landing/methodology` }, +}; + +export default function VariantB() { + return ( +
+
+ + {/* Hero */} +
+

+ By SemiAnalysis +

+

+ Open Source Continuous Inference Benchmark +

+

+ InferenceX is the open-source AI inference benchmark that matches the rapid pace of + modern AI development. Powered by one of the largest open-source GPU CI/CD fleets with + NVIDIA GB200, AMD MI355X & many more. +

+
+ + {/* Stats bar */} +
+ {STATS.map((s) => ( +
+

+ {s.value} +

+

+ {s.label} +

+
+ ))} +
+ + {/* All 6 presets */} +
+

+ Quick Comparisons +

+
+ {PRESETS.map((p) => ( + +
+

+ {p.title} +

+ +
+

{p.description}

+ + ))} +
+
+ + {/* CTA */} +
+ + Open Full Dashboard + + + + See what supporters say → + +
+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/open-source/page.tsx b/packages/app/src/app/landing/open-source/page.tsx new file mode 100644 index 00000000..68273ddd --- /dev/null +++ b/packages/app/src/app/landing/open-source/page.tsx @@ -0,0 +1,166 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const PRESETS_LEFT = [ + { + title: 'GB200 NVL72 vs B200', + description: 'Multi vs Single Node — Dynamo TRT on DeepSeek R1 at FP4.', + href: '/inference?preset=gb200-vs-b200', + }, + { + title: 'B200 vs H200', + description: 'Blackwell vs Hopper — throughput per GPU at FP8.', + href: '/inference?preset=b200-vs-h200', + }, + { + title: 'AMD MI300X → MI355X', + description: 'Three generations of AMD Instinct on SGLang at FP8.', + href: '/inference?preset=amd-generations', + }, +]; + +const PRESETS_RIGHT = [ + { + title: 'H100 vs GB300 Disagg', + description: 'Cross-generation disagg comparison on DeepSeek R1.', + href: '/inference?preset=h100-vs-gb300-disagg', + }, + { + title: 'Disagg B200 vs MI355X', + description: 'Cross-vendor disaggregated serving at FP8.', + href: '/inference?preset=disagg-b200-vs-mi355x', + }, + { + title: 'MI355X Over Time', + description: 'SGLang disagg throughput improvements on DeepSeek R1.', + href: '/inference?preset=mi355x-sglang-disagg-timeline', + }, +]; + +const NAV_SECTIONS = [ + { + heading: 'Explore', + links: [ + { label: 'Full Dashboard', href: '/inference' }, + { label: 'Supporters', href: '/quotes' }, + { label: 'Articles', href: '/blog' }, + ], + }, + { + heading: 'Contribute', + links: [ + { label: 'GitHub — Benchmarks', href: 'https://github.com/SemiAnalysisAI/InferenceX' }, + { label: 'GitHub — Frontend', href: 'https://github.com/SemiAnalysisAI/InferenceX-app' }, + { label: 'GPU Reliability', href: '/reliability' }, + ], + }, + { + heading: 'SemiAnalysis', + links: [ + { label: 'Main Site', href: 'https://semianalysis.com' }, + { label: 'Newsletter', href: 'https://newsletter.semianalysis.com' }, + { label: 'About', href: 'https://semianalysis.com/about/' }, + ], + }, +]; + +export const metadata: Metadata = { + title: 'Landing Variant D — Split Presets + Nav', + description: 'InferenceX landing page variant with two-column presets and navigation sections.', + alternates: { canonical: `${SITE_URL}/landing/open-source` }, +}; + +export default function VariantD() { + return ( +
+
+ +
+

+ InferenceX by SemiAnalysis +

+

+ Every model, GPU, framework, and metric. +

+

+ Fully configurable inference benchmark charts with date ranges, concurrency sweeps, + and raw data export. Compare NVIDIA B200, H200, H100, AMD MI355X, MI325X, MI300X and + more across DeepSeek, gpt-oss, Llama, Qwen, and other models. +

+
+ + {/* Two-column presets */} +
+
+

+ NVIDIA Comparisons +

+ {PRESETS_LEFT.map((p) => ( + +
+

+ {p.title} +

+

{p.description}

+
+ + + ))} +
+
+

+ Cross-Vendor & Timeline +

+ {PRESETS_RIGHT.map((p) => ( + +
+

+ {p.title} +

+

{p.description}

+
+ + + ))} +
+
+ + {/* Nav sections */} +
+ {NAV_SECTIONS.map((s) => ( +
+

+ {s.heading} +

+
    + {s.links.map((l) => ( +
  • + + {l.label} + +
  • + ))} +
+
+ ))} +
+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/partners/page.tsx b/packages/app/src/app/landing/partners/page.tsx new file mode 100644 index 00000000..389df8c7 --- /dev/null +++ b/packages/app/src/app/landing/partners/page.tsx @@ -0,0 +1,127 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight, Quote } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const SUPPORTERS = [ + 'OpenAI', + 'NVIDIA', + 'AMD', + 'Microsoft', + 'Together AI', + 'vLLM', + 'GPU Mode', + 'PyTorch Foundation', + 'Oracle', + 'CoreWeave', + 'Nebius', + 'Crusoe', + 'TensorWave', + 'SGLang', + 'WEKA', + 'Stanford', + 'Core42', + 'Meta', + 'Hugging Face', + 'UC Berkeley', + 'Lambda', + 'UC San Diego', +]; + +const FEATURED_QUOTE = { + text: "As we build systems at unprecedented scale, it's critical for the ML community to have open, transparent benchmarks that reflect how inference really performs across hardware and software. InferenceMAX's head-to-head benchmarks cut through the noise and provide a living picture of token throughput, performance per dollar, and tokens per Megawatt.", + name: 'Peter Hoeschele', + title: 'VP of Infrastructure and Industrial Compute, OpenAI Stargate', +}; + +export const metadata: Metadata = { + title: 'Landing Variant E — Supporters Wall', + description: 'InferenceX landing page variant with a supporter logo wall and featured quote.', + alternates: { canonical: `${SITE_URL}/landing/partners` }, +}; + +export default function VariantE() { + return ( +
+
+ +
+

+ InferenceX +

+

+ Trusted by the companies building AI infrastructure. +

+

+ Compare AI inference performance across GPUs and frameworks. Real benchmarks on NVIDIA + GB200, B200, AMD MI355X, and more. Free, open-source, continuously updated. +

+
+ + {/* Supporter wall */} +
+

+ Supported By 36+ Industry Leaders +

+
+ {SUPPORTERS.map((org) => ( + + {org} + + ))} +
+
+ + {/* Featured quote */} +
+ +

+ “{FEATURED_QUOTE.text}” +

+
+

{FEATURED_QUOTE.name}

+

{FEATURED_QUOTE.title}

+
+
+ + {/* CTAs */} +
+ + Open Dashboard + + + + See all supporters → + + + Star on GitHub → + + + Read Articles → + +
+
+
+
+ ); +} diff --git a/packages/app/src/app/landing/status/page.tsx b/packages/app/src/app/landing/status/page.tsx new file mode 100644 index 00000000..06549ee0 --- /dev/null +++ b/packages/app/src/app/landing/status/page.tsx @@ -0,0 +1,158 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { ArrowRight, Quote } from 'lucide-react'; + +import { Card } from '@/components/ui/card'; +import { SITE_URL } from '@semianalysisai/inferencex-constants'; + +const PRESETS = [ + { + title: 'GB200 NVL72 vs B200 — Multi vs Single Node', + description: 'GB200 NVL72 Dynamo TRT vs B200 Dynamo TRT on DeepSeek R1 (8k/1k) at FP4.', + href: '/inference?preset=gb200-vs-b200', + tags: ['DeepSeek', 'GB200', 'B200', 'Dynamo', 'FP4', 'NVL72'], + }, + { + title: 'B200 vs H200 — Blackwell vs Hopper', + description: + 'Blackwell B200 vs Hopper H200 Dynamo TRT throughput per GPU on DeepSeek R1 at FP8.', + href: '/inference?preset=b200-vs-h200', + tags: ['DeepSeek', 'B200', 'H200', 'Dynamo', 'FP8'], + }, + { + title: 'AMD MI300X → MI325X → MI355X', + description: + 'Three generations of AMD Instinct on SGLang at FP8. Generational throughput scaling.', + href: '/inference?preset=amd-generations', + tags: ['DeepSeek', 'MI300X', 'MI325X', 'MI355X', 'SGLang', 'FP8'], + }, + { + title: 'H100 vs GB300 Disagg — DeepSeek', + description: 'H100 FP8 disagg vs GB300 FP8 disagg vs GB300 FP4 disagg on DeepSeek R1.', + href: '/inference?preset=h100-vs-gb300-disagg', + tags: ['DeepSeek', 'H100', 'GB300', 'Disagg', 'FP8', 'FP4'], + }, + { + title: 'Disagg B200 SGLang vs MI355X vs B200 TRT', + description: + 'Disaggregated B200 Dynamo SGLang vs MI355X MoRI SGLang vs B200 Dynamo TRT at FP8.', + href: '/inference?preset=disagg-b200-vs-mi355x', + tags: ['DeepSeek', 'B200', 'MI355X', 'Disagg', 'FP8'], + }, + { + title: 'MI355X SGLang Disagg Over Time', + description: 'MI355X SGLang disaggregated inference on DeepSeek R1 FP8 — throughput over time.', + href: '/inference?preset=mi355x-sglang-disagg-timeline', + tags: ['DeepSeek', 'MI355X', 'SGLang', 'FP8', 'Timeline'], + }, +]; + +const FEATURED_QUOTE = { + text: 'InferenceMAX highlights workloads that the ML community cares about. At NVIDIA, we welcome these comparisons because they underscore the advantage of our full-stack approach — from GPU hardware to NVLink networking to NVL72 Rack Scale to Dynamo disaggregated serving.', + name: 'Ian Buck', + title: 'VP & GM, Hyperscale, NVIDIA & Inventor of CUDA', +}; + +export const metadata: Metadata = { + title: 'Landing Variant H — Full Preset Cards + Tags', + description: + 'InferenceX landing page variant with full-width tagged preset cards mirroring the OG layout.', + alternates: { canonical: `${SITE_URL}/landing/status` }, +}; + +export default function VariantH() { + return ( +
+
+ +
+

+ InferenceX +

+

+ Compare GPU inference performance. For real. +

+

+ Every model, GPU, framework, and metric. Fully configurable inference benchmark charts + with date ranges, concurrency sweeps, and raw data export. +

+
+ + Open Dashboard + + + + Star on GitHub → + +
+
+ + {/* All 6 presets with full tags */} +
+ {PRESETS.map((p) => ( + +
+
+

+ {p.title} +

+ +
+

{p.description}

+
+ {p.tags.map((t) => ( + + {t} + + ))} +
+ + ))} +
+ + {/* Quote */} +
+ +

+ “{FEATURED_QUOTE.text}” +

+

+ — {FEATURED_QUOTE.name}, {FEATURED_QUOTE.title} +

+
+ +
+ + Supporters → + + + Articles → + + + GPU Reliability → + + + Newsletter → + +
+
+
+
+ ); +}