Skip to content

LangChain调用DeepSeek

安装

shell
pip install langchain openai langchain-community

测试

python
from langchain.llms.base import LLM
from typing import Optional, List, Mapping, Any
import requests, os

class DeepSeekLLM(LLM):
    api_key: str # api_key
    api_url: str # base_url

    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        # 调用 DeepSeek 模型的 API
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": "deepseek-chat",  # 指定模型
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "temperature": 0.7,
            "max_tokens": 1000
        }
        
        try:
            response = requests.post(self.api_url, headers=headers, json=data)
            response.raise_for_status()
            data = response.json()
            
            # for choice in data["choices"]:
            #     print("choice >> ", choice)
            
            return data["choices"][0]["message"]["content"]
            
        except requests.exceptions.HTTPError as e:
            print(f"HTTP Error: {e.response.status_code} - {e.response.text}")
            raise
        except Exception as e:
            print(f"Error: {str(e)}")
            raise

    @property
    def _llm_type(self) -> str:
        return "deepseek"

try:
    deepseek_llm = DeepSeekLLM(
        api_key=os.getenv("DEEP_SEEK_API_KEY"), 
        api_url="https://api.deepseek.com/v1/chat/completions"
    )
    response = deepseek_llm("你是谁")
    print("调用结果:", response)
except Exception as e:
    print(f"Failed to get response: {str(e)}")