-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
105 lines (79 loc) · 3.33 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
from langchain.llms import CTransformers #to load the ggml model (model uses llama.cpp)
from langchain.chains import LLMChain #to run LLM queries
from langchain import PromptTemplate
#CHANGE TO GGUF INSTEAD OF GGML...
#atm, the llama-2-13b-chat model is top 15/top 30 ish so use for now
import os, io, requests
import streamlit as st #framework to create our web app
from docx import Document #to create word doc file
from docx.shared import Inches
from PIL import Image #to open image
from fetchImage import fetchImage #our fetchImage function
#load model
def load_llm(max_tokens, prompt_template) :
#load local llama-2-13b-chat model
llm = CTransformers(
model="codeup-llama-2-13b-chat-hf.ggmlv3.q2_K.bin",
model_type="llama",
max_new_tokens = max_tokens,
temperature = 0.5
)
llm_chain = LLMChain(
llm = llm,
prompt= PromptTemplate.from_template(prompt_template)
)
return llm_chain
def createWordDoc(userInput, paragraph, imageInput):
doc = Document()
doc.add_heading(userInput, level=1) #user provides the doc heading
doc.add_paragraph(paragraph)
doc.add_heading('Image', level=1) #layout
imageStream = io.BytesIO()
imageInput.save(imageStream, format='PNG')
imageInput.seek(0)
doc.add_picture(imageStream, width=Inches(4))
return doc
st.set_page_config(layout='wide')
def main():
st.title("Article Generator 📰")
userInput = st.text_input("Please enter an article topic.")
imageResponse = st.text_input("Please enter an image topic.")
if userInput and imageResponse:
col1, col2, col3 = st.columns([1,2,1]) #layout columns
with col1:
st.subheader("Content generated by Llama 2")
st.write("Your article topic: ", userInput)
st.write("Your image topic: ", imageResponse)
prompt_template = """Your task is to generate articles for a given topic. Write an article on {topic} of 800 words. Stay on the user's topic. Maintain a professional but creative tone."""
# result = "article stuff is going here..."
# st.write(result)
llm_call = load_llm(max_tokens=750, prompt_template=prompt_template)
# print(llm_call)
result = llm_call(userInput)
if result :
st.info("Article has been generated!")
st.write(result)
else:
st.error("Sorry, we couldn't generate an article for the given topic")
with col2:
st.subheader("Your article's image")
imageUrl = fetchImage(imageResponse)
st.image(imageUrl)
with col3:
st.subheader("Download article")
imageResponse = requests.get(imageUrl) #get image from the internet
image = Image.open(io.BytesIO(imageResponse.content))
doc = createWordDoc(userInput, result['text'], image)
#save word doc to buffer
docBuffer = io.BytesIO()
doc.save(docBuffer)
docBuffer.seek(0)
#download button
st.download_button(
label="Download article",
data=docBuffer,
file_name=f"{userInput}.docx",
mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
if __name__ == "__main__":
main()