The results show that MovingColor is the only method that can achieve both spatial temporal consistency while with minimal color deviation in the non-edge area.
Dataset release pending copyright review.
@inproceedings{dong2024movingcolor,
author = {Dong, Yi and Wang, Yuxi and Fang, Zheng and Ouyang, Wenqi and Lin, Xianhui and Shen, Zhiqi and Ren, Peiran and Xie, Xuansong and Huang, Qingming},
title = {MovingColor: Seamless Fusion of Fine-grained Video Color Enhancement},
year = {2024},
isbn = {9798400706868},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3664647.3681130},
doi = {10.1145/3664647.3681130},
booktitle = {Proceedings of the 32nd ACM International Conference on Multimedia},
pages = {7454–7463},
numpages = {10},
keywords = {color fusion, video color enhancement, video editing},
location = {Melbourne VIC, Australia},
series = {MM '24}
}