赞
踩
github源码解析:https://github.com/ArtificialZeng/Baichuan2-Explained
- import json
- import torch
- import streamlit as st
- from transformers import AutoModelForCausalLM, AutoTokenizer
- from transformers.generation.utils import GenerationConfig
-
-
- st.set_page_config(page_title="Baichuan 2")
- st.title("Baichuan 2")
-
-
- @st.cache_resource
- def init_model():
- model = AutoModelForCausalLM.from_pretrained(
- "baichuan-inc/Baichuan2-13B-Chat",
- torch_dtype=torch.float16,
- device_map="auto",
- trust_remote_code=True
- )
- model.generation_config = GenerationConfig.from_pretrained(
- "baichuan-inc/Baichuan2-13B-Chat"
- )
- tokenizer = AutoTokenizer.from_pretrained(
- "baichuan-inc/Baichuan2-13B-Chat",
- use_fast=False,
- trust_remote_code=True
- )
- return model, tokenizer
-
-
- def clear_chat_history():
- del st.session_state.messages
-
-
- def init_chat_history():
- 推荐阅读
相关标签
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。