@ARTICLE{Walkowiak_Bartosz_Implementation_2024, author={Walkowiak, Bartosz and Walkowiak, Tomasz}, volume={vol. 70}, number={No 1}, journal={International Journal of Electronics and Telecommunications}, pages={153–159}, howpublished={online}, year={2024}, publisher={Polish Academy of Sciences Committee of Electronics and Telecommunications}, abstract={This paper explores cost-effective alternatives for resource-constrained environments in the context of language models by investigating methods such as quantization and CPUbased model implementations. The study addresses the computational efficiency of language models during inference and the development of infrastructure for text document processing. The paper discusses related technologies, the CLARIN-PL infrastructure architecture, and implementations of small and large language models. The emphasis is on model formats, data precision, and runtime environments (GPU and CPU). It identifies optimal solutions through extensive experimentation. In addition, the paper advocates for a more comprehensive performance evaluation approach. Instead of reporting only average token throughput, it suggests considering the curve’s shape, which can vary from constant to monotonically increasing or decreasing functions. Evaluating token throughput at various curve points, especially for different output token counts, provides a more informative perspective.}, type={Article}, title={Implementation of language models within an infrastructure designed for Natural Language Processing}, URL={http://www.czasopisma.pan.pl/Content/130704/18_4466_Walkowiak_L_sk.pdf}, doi={10.24425/ijet.2024.149525}, keywords={language model deployment, quantization, Llama-2, E5 model, ONNX, llama.cpp, CLARIN-PL}, }