Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions source/_data/SymbioticLab.bib
Original file line number Diff line number Diff line change
Expand Up @@ -2141,6 +2141,20 @@ @Article{tetriserve:arxiv25
}
}

@InProceedings{tetriserve:asplos26,
author = {Runyu Lu and Shiqi He and Wenxuan Tan and Shenggui Li and Ruofan Wu and Jeff J. Ma and Ang Chen and Mosharaf Chowdhury},
booktitle = {ASPLOS},
title = {{TetriServe}: Efficiently Serving Mixed {DiT} Workloads},
year = {2026},
month = {March},
publist_confkey = {ASPLOS'26},
publist_link = {paper || tetriserve-asplos26.pdf},
publist_topic = {Systems + AI},
publist_abstract = {
Diffusion Transformer (DiT) models excel at generating high-quality images through iterative denoising steps, but serving them under strict Service Level Objectives (SLOs) is challenging due to their high computational cost, particularly at larger resolutions. Existing serving systems use fixed-degree sequence parallelism, which is inefficient for heterogeneous workloads with mixed resolutions and deadlines, leading to poor GPU utilization and low SLO attainment. In this paper, we propose step-level sequence parallelism to dynamically adjust the degree of parallelism of individual requests according to their deadlines. We present TetriServe, a DiT serving system that implements this strategy for highly efficient image generation. Specifically, TetriServe introduces a novel round-based scheduling mechanism that improves SLO attainment by (1) discretizing time into fixed rounds to make deadline-aware scheduling tractable, (2) adapting parallelism at the step level and minimizing GPU hour consumption, and (3) jointly packing requests to minimize late completions. Extensive evaluation on state-of-the-art DiT models shows that TetriServe achieves up to 32% higher SLO attainment compared to existing solutions without degrading image quality.
}
}

@InProceedings{mlenergy-benchmark:neuripsdb25,
title = {The {ML.ENERGY} Benchmark: Toward Automated Inference Energy Measurement and Optimization},
author = {Jae-Won Chung and Jeff J. Ma and Ruofan Wu and Jiachen Liu and Oh Jun Kweon and Yuxuan Xia and Zhiyu Wu and Mosharaf Chowdhury},
Expand Down
Binary file not shown.
4 changes: 4 additions & 0 deletions source/publications/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,10 @@ venues:
ASPLOS:
category: Conferences
occurrences:
- key: ASPLOS'26
name: The 31st ACM International Conference on Architectural Support for Programming Languages and Operating Systems
date: 2026-03-22
url: https://asplos-conference.org/2026/
- key: ASPLOS'23
name: The 28th ACM International Conference on Architectural Support for Programming Languages and Operating Systems
date: 2023-03-25
Expand Down