from lmdeploy import pipeline, TurbomindEngineConfig
from lmdeploy.vl import load_image
# MiniCPM-Llama3-V 2.5 can be replaced with a local path
#session_len=2048 represents the context length
#tp=8 represents the number of graphics cards used, which must be 2**n, such as 1, 2, 4, 8
pipe = pipeline('MiniCPM-Llama3-V 2.5',
backend_config=TurbomindEngineConfig(session_len=2048,tp=8),)
# image url or local path
"/root/ld/ld_project/MiniCPM-V/assets/minicpmv2-cases.png",
"/root/ld/ld_project/MiniCPM-V/assets/llavabench_compare_phi3.png",
"/root/ld/ld_project/MiniCPM-V/assets/MiniCPM-Llama3-V-2.5-peformance.png",
"/root/ld/ld_project/MiniCPM-V/assets/zhihu.webp",
"/root/ld/ld_project/MiniCPM-V/assets/thunlp.png"
prompts = [('describe this image', load_image(img_url)) for img_url in image_urls]
response = pipe(prompts)
print([i.text for i in response])