@article{10.1145/3687768, author = {Zhou, Yuxiao and Chai, Menglei and Wang, Daoye and Winberg, Sebastian and Wood, Erroll and Sarkar, Kripasindhu and Gross, Markus and Beeler, Thabo}, title = {GroomCap: High-Fidelity Prior-Free Hair Capture}, year = {2024}, issue_date = {December 2024}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {43}, number = {6}, issn = {0730-0301}, url = {https://doi.org/10.1145/3687768}, doi = {10.1145/3687768}, abstract = {Despite recent advances in multi-view hair reconstruction, achieving strand-level precision remains a significant challenge due to inherent limitations in existing capture pipelines. We introduce GroomCap, a novel multi-view hair capture method that reconstructs faithful and high-fidelity hair geometry without relying on external data priors. To address the limitations of conventional reconstruction algorithms, we propose a neural implicit representation for hair volume that encodes high-resolution 3D orientation and occupancy from input views. This implicit hair volume is trained with a new volumetric 3D orientation rendering algorithm, coupled with 2D orientation distribution supervision, to effectively prevent the loss of structural information caused by undesired orientation blending. We further propose a Gaussian-based hair optimization strategy to refine the traced hair strands with a novel chained Gaussian representation, utilizing direct photometric supervision from images. Our results demonstrate that GroomCap is able to capture high-quality hair geometries that are not only more precise and detailed than existing methods but also versatile enough for a range of applications.}, journal = {ACM Trans. Graph.}, month = nov, articleno = {254}, numpages = {15}, keywords = {strand-level hair modeling, multi-view reconstruction} }