{"type":"rich","version":1.0,"title":"Fetching llama3.2 into local ollama installation, and testing initial chat (with a hallucinated answer)","width":1901,"html":"<a href=\"https://asciinema.org/a/680936\" target=\"_blank\"><img alt=\"Fetching llama3.2 into local ollama installation, and testing initial chat (with a hallucinated answer)\" src=\"https://asciinema.org/a/680936.png\" width=\"1901\"></a>","height":952,"author_name":"jmfernandez","author_url":"https://asciinema.org/~jmfernandez","provider_name":"asciinema","provider_url":"https://asciinema.org/","thumbnail_height":952,"thumbnail_url":"https://asciinema.org/a/680936.png","thumbnail_width":1901}