-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
194 lines (155 loc) · 6.05 KB
/
app.py
File metadata and controls
194 lines (155 loc) · 6.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
# One function to configure the ticker and fetch link
def get_latest_link(ticker: str) -> str:
import re
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
# Set default headers/parameters
base_url = r"https://www.sec.gov/cgi-bin/browse-edgar"
headers = {
"User-Agent": "Apex User Agent",
"From": "apexfund@gmail.com",
}
param_dict = {
"action": "getcompany",
"CIK": ticker,
"type": "10-k",
"owner": "exclude",
"output": "XML",
"count": "10",
}
# Fetch response from EDGAR
response = requests.get(url=base_url, params=param_dict, headers=headers)
print(response.url)
soup = bs(response.content, "lxml")
print("Request Successful")
# Define a base url that will be used for link building.
base_url_sec = r"https://www.sec.gov"
# Find the document table with our data
doc_table = soup("table")[2]
doc_table = bs(str(doc_table), "html.parser")
# Locating the 10-k file links
strink = ""
for item in doc_table.find_all(string=re.compile("10-K")):
strink += str(item.parent.parent.prettify())
ten_k_rows = bs(strink, "html.parser")
with open("row.txt", "w") as file:
file.write(str(ten_k_rows.prettify()))
file_dict = {}
file_dict["file_type"] = param_dict["type"]
file_dict["ticker"] = param_dict["CIK"]
file_dict["links"] = {}
if len(ten_k_rows) != 0:
for row in ten_k_rows.find_all("tr"):
filing_date = str(
row.find_all("td")[3].text
).strip() # Extract date from the HTML directly
date = str(filing_date).replace("-", "")
file_dict["links"][date] = {}
filing_doc_href = row.find("a", {"id": "documentsbutton"})
if filing_doc_href is not None:
file_dict["links"][date]["documents"] = (
base_url_sec + filing_doc_href["href"]
)
filing_int_href = row.find("a", {"id": "interactiveDataBtn"})
if filing_int_href is not None:
file_dict["links"][date]["interactive_data"] = (
base_url_sec + filing_int_href["href"]
)
# print(file_dict)
ten_k = requests.get(
url=file_dict["links"][next(iter(file_dict["links"]))]["interactive_data"],
headers=headers,
)
ten_k_soup = bs(ten_k.content, "lxml")
ten_k_doc_href = ten_k_soup.find_all(string=re.compile("10-K"))[0].parent.parent[
"href"
]
fin = base_url_sec + ten_k_doc_href
fin = fin[:20] + fin[28:]
return fin
# One function to parse html and store in DB
def fetch_parse_store(url: str) -> None:
import os
import requests
from langchain import hub
from bs4 import BeautifulSoup as bs
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders.html_bs import BSHTMLLoader
from langchain_community.vectorstores.chroma import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
#### INDEXING ####
headers = {
"User-Agent": "Apex User Agent",
"From": "apexfundquant@gmail.com",
}
# Send a GET request to the URL
response = requests.get(url, headers=headers)
# Check if the request was successful
if response.status_code == 200:
# Use BeautifulSoup to parse the HTML content
soup = bs(response.text, "html.parser")
# Write the parsed HTML to a local file
with open("/tmp/content.html", "w", encoding="utf-8") as file:
file.write(str(soup.prettify()))
print("HTML content saved successfully.")
else:
print(f"Failed to retrieve the webpage: Status code {response.status_code}")
# One function for actual operation
def fetch_completions(prompt: str) -> str:
import os
import requests
from langchain import hub
from bs4 import BeautifulSoup as bs
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders.html_bs import BSHTMLLoader
from langchain_community.vectorstores.chroma import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
loader = BSHTMLLoader("/tmp/content.html")
data = loader.load()
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(data)
# Embed
vectorstore = Chroma.from_documents(
documents=splits,
embedding=OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY),
)
retriever = vectorstore.as_retriever()
# print(retriever)
#### RETRIEVAL and GENERATION ####
# Prompt
prompt = hub.pull("rlm/rag-prompt")
# LLM
llm = ChatOpenAI(model_name="gpt-4-turbo", temperature=0)
# Post-processing
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
# print(format_docs(docs))
# Chain
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
# Question
return rag_chain.invoke(prompt)
def lambda_handler(event, context):
ticker = event["ticker"]
prompt = event["prompt"]
url = get_latest_link(ticker)
fetch_parse_store(url)
return fetch_completions(prompt)