## Core Components
### Chains
```python
from langchain import OpenAI, LLMChain, PromptTemplate
from langchain.chains import SimpleSequentialChain, RouterChain
template = """You are a helpful assistant.
Question: {question}
Answer:"""
prompt = PromptTemplate(
input_variables=["question"],
template=template
)
llm = OpenAI(temperature=0.7)
chain = LLMChain(llm=llm, prompt=prompt)
result = chain.run("What is machine learning?")
```
### RAG Pipeline
```python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
loader = TextLoader("docs/")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vectordb = Chroma.from_documents(
texts,
embeddings,
persist_directory="./chroma_db"
)
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vectordb.as_retriever(search_kwargs={"k": 4}),
return_source_documents=True
)
result = qa_chain({"query": "What are the main topics?"})
```
### Agents with Tools
```python
from langchain.agents import initialize_agent, Tool, AgentType
from langchain.tools import DuckDuckGoSearchRun
from langchain.utilities import WikipediaAPIWrapper
tools = [
Tool(
name="Search",
func=DuckDuckGoSearchRun().run,
description="Useful for searching current information"
),
Tool(
name="Wikipedia",
func=WikipediaAPIWrapper().run,
description="Useful for general knowledge"
),
Tool(
name="Calculator",
func=lambda x: eval(x), # Use safe_eval in production
description="Useful for mathematical calculations"
)
]
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True
)
agent.run("What is the population of Tokyo divided by the population of New York?")
```
### Memory Systems
```python
from langchain.memory import (
ConversationBufferMemory,
ConversationBufferWindowMemory,
VectorStoreRetrieverMemory
)
memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True
)
from langchain.chains import ConversationChain
conversation = ConversationChain(
llm=llm,
memory=memory,
verbose=True
)
conversation.predict(input="Hi, I'm Alice")
conversation.predict(input="What's my name?")
```
## LCEL (LangChain Expression Language)
```python
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
from langchain.prompts import ChatPromptTemplate
from operator import itemgetter
prompt = ChatPromptTemplate.from_template("""
Answer based on context:
{context}
Question: {question}
""")
chain = (
{
"context": itemgetter("question") | retriever,
"question": RunnablePassthrough()
}
| prompt
| llm
)
chain.invoke({"question": "What is this about?"})
```
## Best Practices
- Use LCEL for new projects
- Implement proper error handling
- Cache embeddings for performance
- Monitor token usage
- Use async for concurrent operations