manshu2025 commited on
Commit
8574434
·
0 Parent(s):

initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *.csv filter=lfs diff=lfs merge=lfs -text
2
+ *.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
README.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Book Recommender Dataset (Emotions & Categories)
3
+ task_categories:
4
+ - tabular-classification
5
+ - retrieval
6
+ - recommendation
7
+ tags:
8
+ - books
9
+ - embeddings
10
+ - emotions
11
+ - categories
12
+ license: mit
13
+ size_categories:
14
+ - 10K<n<100K
15
+ ---
16
+
17
+ # Book Recommender Dataset
18
+
19
+ CSV exports from my Book Recommender pipeline. Includes cleaned metadata, category labels, emotion tags, and a tagged description file.
20
+
21
+ ## Files
22
+ - `books_cleaned.csv`: Core cleaned book metadata.
23
+ - `books_with_categories.csv`: Adds multi-label `categories` column.
24
+ - `books_with_emotions.csv`: Adds `emotion_*` columns (one-hot or scores).
25
+ - `tagged_description.txt`: Preprocessed descriptions (one per line, or TSV).
26
+
27
+ ## Column Schema (example)
28
+ - `book_id` (str)
29
+ - `title` (str)
30
+ - `author` (str)
31
+ - `description` (str)
32
+ - `categories` (list[str] or pipe-separated str)
33
+ - `emotion_joy` (float), `emotion_sadness` (float), ...
34
+
35
+ ## How to load
36
+  ⁠python
37
+ from datasets import load_dataset
38
+ ds = load_dataset("<your-username>/book-recommender-dataset", data_files="data/books_with_emotions.csv")
39
+ ds["train"][0]
40
+
data/books_cleaned.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ad2bbd712a8c59d3c10409a2c50f34beb32fb2e4745ad9165bf9511318fdaa0
3
+ size 6387074
data/books_with_categories.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a50d3e19adf550a8364855172707fd46b1f5c8df60ae5366ab073f4a55b007ec
3
+ size 6439582
data/books_with_emotions.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad3818fcb99a14c9bb11de840069b9bfae0fd7b2efee091acc7c9532e027619d
3
+ size 7149129
data/tagged_description.txt ADDED
The diff for this file is too large to render. See raw diff
 
requirments.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ pandas
3
+ kagglehub
4
+ python-dotenv
5
+ langchain-community
6
+ langchain-openai
7
+ transformers
8
+ torch
9
+ chromadb
10
+ streamlit
11
+ gradio
12
+ ipykernel
13
+ ipywidgets
src/dataset_cleaning.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ print('hello world')
2
+
3
+ # %%
4
+ import numpy as np
5
+ import pandas as pd
6
+
7
+ # %%
8
+ import kagglehub
9
+
10
+ # Download latest version
11
+ path = kagglehub.dataset_download("dylanjcastillo/7k-books-with-metadata")
12
+
13
+ print("Path to dataset files:", path)
14
+
15
+ # %%
16
+ import socket
17
+
18
+ print(socket.gethostbyname("www.kaggle.com"))
19
+
20
+
21
+ # %%
22
+ import warnings
23
+ warnings.filterwarnings("ignore")
24
+
25
+ # %%
26
+ import pandas as pd
27
+ import numpy as np
28
+ import matplotlib.pyplot as plt
29
+
30
+ # %%
31
+ books = pd.read_csv(f"{path}/books.csv")
32
+
33
+ # %%
34
+ books
35
+
36
+ # %%
37
+ books.isnull().sum()
38
+
39
+ # %%
40
+ import pandas as pd
41
+
42
+ def dataset_summary(df):
43
+ summary = pd.DataFrame({
44
+ "column": df.columns,
45
+ "missing": df.isnull().sum().values,
46
+ "count": df.count().values,
47
+ "distinct": df.nunique().values
48
+ })
49
+
50
+ # Get top frequency and value for each column
51
+ top_freqs = []
52
+ top_values = []
53
+
54
+ for col in df.columns:
55
+ if df[col].nunique(dropna=False) > 0:
56
+ most_common = df[col].value_counts(dropna=False).idxmax()
57
+ freq = df[col].value_counts(dropna=False).max()
58
+ else:
59
+ most_common = None
60
+ freq = 0
61
+ top_values.append(most_common)
62
+ top_freqs.append(freq)
63
+
64
+ summary["top_value"] = top_values
65
+ summary["top_frequency"] = top_freqs
66
+
67
+ return summary
68
+
69
+ # Usage
70
+ summary_df = dataset_summary(books)
71
+ print(summary_df)
72
+
73
+
74
+ # %%
75
+ books["missing_description"] = np.where(books["description"].isna(), 1, 0)# put 1 if missing else 0 in the mentioned column
76
+ books['age_of_book']= 2025-books['published_year'] #age of book
77
+
78
+ # %%
79
+ import seaborn as sns
80
+ import matplotlib.pyplot as plt
81
+
82
+ # %%
83
+ columns_of_interest = ["num_pages", "age_of_book", "missing_description", "average_rating"]
84
+
85
+ correlation_matrix = books[columns_of_interest].corr(method = "spearman")
86
+
87
+ sns.set_theme(style="white")
88
+ plt.figure(figsize=(8, 6))
89
+ heatmap = sns.heatmap(correlation_matrix, annot=True, fmt=".2f", cmap="coolwarm",
90
+ cbar_kws={"label": "Spearman correlation"})
91
+ heatmap.set_title("Correlation heatmap")
92
+ plt.show()
93
+
94
+
95
+ # %% [markdown]
96
+ # # checking if missing_description have a correlation with any other column.Clearly from above we can see it does not.
97
+
98
+ # %% [markdown]
99
+ # #How much is it gonna cost us to drop the missing values.
100
+
101
+ # %%
102
+ book_missing = books[
103
+ books["description"].notna() &
104
+ books["num_pages"].notna() &
105
+ books["average_rating"].notna() &
106
+ books["published_year"].notna()
107
+ ]
108
+
109
+
110
+ # %% [markdown]
111
+ # Sure! Let's break down your code step by step:
112
+ #
113
+ # ```python
114
+ # book_missing = books[~(books["description"].isna()) &
115
+ # ~(books["num_pages"].isna()) &
116
+ # ~(books["average_rating"].isna()) &
117
+ # ~(books["published_year"].isna())]
118
+ # ```
119
+ #
120
+ # ---
121
+ #
122
+ # ### 🔍 What This Code Does
123
+ #
124
+ # This line **filters the `books` DataFrame** to only include rows (i.e. books) that **have no missing values** in four specific columns:
125
+ #
126
+ # * `description`
127
+ # * `num_pages`
128
+ # * `average_rating`
129
+ # * `published_year`
130
+ #
131
+ # It stores the result in a new DataFrame called `book_missing`.
132
+ #
133
+ # ---
134
+ #
135
+ # ### 🧠 Breakdown of Logic
136
+ #
137
+ # #### 1. **`books["description"].isna()`**
138
+ #
139
+ # * Checks which rows have missing (`NaN`) values in the `description` column.
140
+ # * Returns a boolean Series: `True` where missing, `False` where present.
141
+ #
142
+ # #### 2. **`~(books["description"].isna())`**
143
+ #
144
+ # * The tilde `~` is a **bitwise NOT operator**, which inverts the boolean Series.
145
+ # * So now it returns `True` for rows **where `description` is present**.
146
+ #
147
+ # #### 3. **Repeat for other columns**:
148
+ #
149
+ # The same logic is applied to:
150
+ #
151
+ # * `num_pages`
152
+ # * `average_rating`
153
+ # * `published_year`
154
+ #
155
+ # #### 4. **Combining Conditions with `&` (AND)**
156
+ #
157
+ # * All four conditions are combined using the `&` operator, meaning:
158
+ #
159
+ # > Only keep rows where **all four columns are not missing**.
160
+ #
161
+ # ---
162
+ #
163
+ # ### ✅ Result
164
+ #
165
+ # * `book_missing` contains **only the books** where all of the following are available:
166
+ #
167
+ # * A description
168
+ # * Number of pages
169
+ # * Average rating
170
+ # * Published year
171
+ #
172
+ # ---
173
+ #
174
+ # ### 📌 Optional: More Readable Version
175
+ #
176
+ # For readability, you can rewrite it using `notna()` instead of `~.isna()`:
177
+ #
178
+ # ```python
179
+ # book_missing = books[
180
+ # books["description"].notna() &
181
+ # books["num_pages"].notna() &
182
+ # books["average_rating"].notna() &
183
+ # books["published_year"].notna()
184
+ # ]
185
+ # ```
186
+ #
187
+ # Let me know if you want to also **analyze** or **count** how many books were excluded or included.
188
+ #
189
+
190
+ # %%
191
+ book_missing
192
+
193
+ # %%
194
+ category_counts = book_missing["categories"].value_counts().reset_index().sort_values("count", ascending=False)
195
+
196
+ # %%
197
+ import matplotlib.pyplot as plt
198
+ import seaborn as sns
199
+
200
+ # Step 1: Count categories
201
+ category_counts = book_missing["categories"].value_counts().reset_index()
202
+
203
+ # Step 2: Rename columns
204
+ category_counts.columns = ["categories", "count"]
205
+
206
+ # Step 3: Sort and select top 10
207
+ top_10_categories = category_counts.sort_values("count", ascending=False).head(10)
208
+
209
+ # Step 4: Plot
210
+ plt.figure(figsize=(10, 6))
211
+ sns.barplot(data=top_10_categories, x="count", y="categories", palette="viridis")
212
+
213
+ plt.title("Top 10 Book Categories by Count")
214
+ plt.xlabel("Count")
215
+ plt.ylabel("Category")
216
+ plt.tight_layout()
217
+ plt.show()
218
+
219
+
220
+ # %% [markdown]
221
+ # # Asyou can see the class is heavily imbalanced
222
+
223
+ # %% [markdown]
224
+ # ## We have long tailed problem
225
+
226
+ # %%
227
+ book_missing
228
+
229
+ # %%
230
+ book_missing["words_in_description"] = book_missing["description"].str.split().str.len()
231
+
232
+ # %% [markdown]
233
+ # # This would split the words by spance and tehn count it, tells us how many words are there
234
+
235
+ # %%
236
+ book_missing
237
+
238
+ # %%
239
+
240
+
241
+ # Ensure the column exists
242
+ book_missing["words_in_description"] = book_missing["description"].str.split().str.len()
243
+
244
+ # Plot histogram
245
+ plt.figure(figsize=(10, 6))
246
+ sns.histplot(data=book_missing, x="words_in_description", bins=100, kde=True, color="skyblue")
247
+
248
+ plt.title("Distribution of Word Counts in Book Descriptions")
249
+ plt.xlabel("Number of Words in Description")
250
+ plt.ylabel("Frequency")
251
+ plt.tight_layout()
252
+ plt.show()
253
+
254
+
255
+ # %%
256
+
257
+
258
+ # Optional: filter to only include descriptions with ≤ 500 words
259
+ filtered = book_missing[book_missing["words_in_description"] <= 500]
260
+
261
+ # Plot
262
+ plt.figure(figsize=(12, 6))
263
+ sns.histplot(data=filtered, x="words_in_description", bins=50, kde=True, color="skyblue")
264
+
265
+ # Set x-axis limits and custom ticks
266
+ plt.xlim(0, 500)
267
+ plt.xticks(ticks=list(range(0, 510, 30))) # Ticks every 30 units
268
+
269
+ plt.title("Distribution of Word Counts in Book Descriptions")
270
+ plt.xlabel("Number of Words in Description")
271
+ plt.ylabel("Frequency")
272
+ plt.tight_layout()
273
+ plt.show()
274
+
275
+
276
+ # %%
277
+ book_missing.loc[book_missing["words_in_description"].between(1, 4), "description"]
278
+
279
+
280
+ # %% [markdown]
281
+ # What it does:
282
+ #
283
+ # book_missing
284
+ # This is your DataFrame — likely contains book data including descriptions.
285
+ #
286
+ # book_missing["words_in_description"]
287
+ # This accesses the column that contains the number of words in each book description.
288
+ #
289
+ # # .between(1, 4)
290
+ # # This returns a boolean Series that's True for rows where the word count is between 1 and 4 (inclusive).
291
+ # # So it selects books with very short descriptions (1–4 words long).
292
+ #
293
+ # book_missing.loc[ ... , "description"]
294
+ # This uses .loc[rows, column] to:
295
+ #
296
+ # Select rows where the description is between 1 and 4 words.
297
+ #
298
+ # Return the description column for just those rows.
299
+
300
+ # %%
301
+ book_missing.loc[book_missing["words_in_description"].between(5, 14), "description"]
302
+
303
+ # %%
304
+
305
+ book_missing.loc[book_missing["words_in_description"].between(15, 24), "description"]
306
+
307
+ # %%
308
+ book_missing.loc[book_missing["words_in_description"].between(25, 34), "description"]
309
+
310
+ # %%
311
+ book_missing_25_words = book_missing[book_missing["words_in_description"] >= 25]
312
+
313
+ # %%
314
+ book_missing_25_words
315
+
316
+ # %%
317
+ book_missing_25_words["title_and_subtitle"] = (
318
+ np.where(book_missing_25_words["subtitle"].isna(),
319
+ book_missing_25_words["title"],
320
+ book_missing_25_words[["title", "subtitle"]].astype(str).agg(": ".join, axis=1))
321
+ )
322
+
323
+
324
+ # %% [markdown]
325
+ # What it does:
326
+ #
327
+ # -This line creates a new column called "title_and_subtitle" in the book_missing_25_words DataFrame.
328
+ # It combines the title and subtitle into one string, but only if a subtitle exists.
329
+ #
330
+ # # Step-by-step explanation:
331
+ #
332
+ # - book_missing_25_words["subtitle"].isna()
333
+ #
334
+ # Checks which rows have NaN (missing) values in the subtitle column.
335
+ #
336
+ # Returns a Boolean Series: True for missing subtitles, False otherwise.
337
+ #
338
+ # - np.where(condition, value_if_true, value_if_false)
339
+ #
340
+ # A vectorized way to write an if-else statement across all rows.
341
+ #
342
+ # # Here’s how it works:
343
+ #
344
+ # If subtitle is missing → use only the title.
345
+ #
346
+ # If subtitle is present → join title and subtitle with ": " in between.
347
+ #
348
+ # book_missing_25_words[["title", "subtitle"]].astype(str)
349
+ #
350
+ # Selects the two columns (title and subtitle) and converts them to string format (to handle any non-string values safely).
351
+ #
352
+ # .agg(": ".join, axis=1)
353
+ #
354
+ # Aggregates (joins) the title and subtitle row-wise (across columns), using ": " as the separator.
355
+ #
356
+ # So "My Book" and "A Guide" → becomes "My Book: A Guide".
357
+ #
358
+ # The result of np.where(...)
359
+ #
360
+ # A Series containing either just the title (if subtitle is missing) or "title: subtitle" (if subtitle is present).
361
+ #
362
+ # Assignment to title_and_subtitle
363
+ #
364
+ # Stores the result in a new column.
365
+
366
+ # %% [markdown]
367
+ #
368
+ #
369
+ # ---
370
+ #
371
+ # ### `book_missing_25_words["subtitle"].isna()`
372
+ #
373
+ # ---
374
+ #
375
+ # ### What it does:
376
+ #
377
+ # `.isna()` is a **Pandas method** used to detect **missing (NaN)** values.
378
+ #
379
+ # returns a **Boolean Series** — one `True` or `False` for each row:
380
+ #
381
+ # * `True` → the subtitle is **missing** (`NaN`)
382
+ # * `False` → the subtitle is **present**
383
+ #
384
+ # ### Why we use `.isna()` inside `np.where`
385
+ #
386
+ # `np.where(condition, if_true, if_false)` needs a **Boolean condition** to decide:
387
+ #
388
+ # * Which value to use **if the condition is True**
389
+ # * Which value to use **if the condition is False**
390
+ #
391
+ # So in your case:
392
+ # means:
393
+ #
394
+ # > If the subtitle is **missing** → use just the `title`;
395
+ # > Otherwise → use `title: subtitle`.
396
+ #
397
+ #
398
+ # * `.isna()` (or its alias `.isnull()`) is the **correct, safe, and standard** way to check for missing data in Pandas.
399
+ #
400
+ # ---
401
+ #
402
+ # ### TL;DR
403
+ #
404
+ # * `.isna()` returns `True` for missing subtitles.
405
+ # * You need that `True/False` Series in `np.where()` to control what value to insert.
406
+ # * It ensures you're only combining title + subtitle **when subtitle actually exists**.
407
+ #
408
+ #
409
+
410
+ # %%
411
+ book_missing_25_words
412
+
413
+ # %%
414
+ book_missing_25_words["tagged_description"] = book_missing_25_words[["isbn13", "description"]].astype(str).agg(" ".join, axis=1)
415
+
416
+ # %%
417
+ book_missing_25_words
418
+
419
+ # %%
420
+ (
421
+ book_missing_25_words
422
+ .drop(["subtitle", "missing_description", "age_of_book", "words_in_description"], axis=1)
423
+ .to_csv("books_cleaned.csv", index = False)
424
+ )
425
+
426
+ # %% [markdown]
427
+ # # Save it to a csv
428
+
429
+
430
+
src/semantic_analysis.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # Emotional classification - we are gonna do a fine tuning in order to get an llm that will do emotional classification and how it does that? check and understand in the video
2
+
3
+ # %%
4
+ import pandas as pd
5
+
6
+ books = pd.read_csv("books_with_categories.csv")
7
+
8
+ # %%
9
+ from transformers import pipeline
10
+ classifier = pipeline("text-classification",
11
+ model="j-hartmann/emotion-english-distilroberta-base",
12
+ top_k = None,
13
+ device=0)
14
+ classifier("I love this!")
15
+
16
+ # %%
17
+ books["description"][0]
18
+
19
+ # %%
20
+ classifier(books["description"][0])
21
+
22
+ # %%
23
+ classifier(books["description"][0].split("."))
24
+
25
+ # %%
26
+ sentences = books["description"][0].split(".")
27
+ predictions = classifier(sentences)
28
+ sentences[0]
29
+
30
+ # %%
31
+ predictions[0]
32
+
33
+ # %%
34
+ sentences[3]
35
+
36
+ # %%
37
+ predictions[3]
38
+
39
+ # %%
40
+ predictions
41
+
42
+ # %%
43
+ sorted(predictions[0], key=lambda x: x["label"])
44
+
45
+ # %%
46
+ import numpy as np
47
+
48
+ emotion_labels = ["anger", "disgust", "fear", "joy", "sadness", "surprise", "neutral"]
49
+ isbn = []
50
+ emotion_scores = {label: [] for label in emotion_labels}
51
+
52
+ def calculate_max_emotion_scores(predictions):
53
+ per_emotion_scores = {label: [] for label in emotion_labels}
54
+ for prediction in predictions:
55
+ sorted_predictions = sorted(prediction, key=lambda x: x["label"])
56
+ for index, label in enumerate(emotion_labels):
57
+ per_emotion_scores[label].append(sorted_predictions[index]["score"])
58
+ return {label: np.max(scores) for label, scores in per_emotion_scores.items()}
59
+
60
+ # %%
61
+ for i in range(10):
62
+ isbn.append(books["isbn13"][i])
63
+ sentences = books["description"][i].split(".")
64
+ predictions = classifier(sentences)
65
+ max_scores = calculate_max_emotion_scores(predictions)
66
+ for label in emotion_labels:
67
+ emotion_scores[label].append(max_scores[label])
68
+ emotion_scores
69
+
70
+ # %%
71
+ from tqdm import tqdm
72
+
73
+ emotion_labels = ["anger", "disgust", "fear", "joy", "sadness", "surprise", "neutral"]
74
+ isbn = []
75
+ emotion_scores = {label: [] for label in emotion_labels}
76
+
77
+ for i in tqdm(range(len(books))):
78
+ isbn.append(books["isbn13"][i])
79
+ sentences = books["description"][i].split(".")
80
+ predictions = classifier(sentences)
81
+ max_scores = calculate_max_emotion_scores(predictions)
82
+ for label in emotion_labels:
83
+ emotion_scores[label].append(max_scores[label])
84
+
85
+ # %%
86
+ emotions_df = pd.DataFrame(emotion_scores)
87
+ emotions_df["isbn13"] = isbn
88
+ emotions_df
89
+
90
+ # %%
91
+ books = pd.merge(books, emotions_df, on = "isbn13")
92
+ books
93
+
94
+
95
+ # %%
96
+ books.to_csv("books_with_emotions.csv", index = False)
97
+
98
+
src/text_classification.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # Here we are gonns do zero shot text classification using llm
2
+
3
+ # %%
4
+ import pandas as pd
5
+
6
+ books = pd.read_csv("books_cleaned.csv")
7
+
8
+ # %%
9
+ books["categories"].value_counts().reset_index()
10
+
11
+ # %%
12
+
13
+ books["categories"].value_counts().reset_index().query("count > 50")
14
+
15
+ # %%
16
+ books[books["categories"] == "Juvenile Fiction"]
17
+
18
+ # %%
19
+
20
+
21
+ books[books["categories"] == "Juvenile Nonfiction"]
22
+
23
+ # %%
24
+ category_mapping = {'Fiction' : "Fiction",
25
+ 'Juvenile Fiction': "Children's Fiction",
26
+ 'Biography & Autobiography': "Nonfiction",
27
+ 'History': "Nonfiction",
28
+ 'Literary Criticism': "Nonfiction",
29
+ 'Philosophy': "Nonfiction",
30
+ 'Religion': "Nonfiction",
31
+ 'Comics & Graphic Novels': "Fiction",
32
+ 'Drama': "Fiction",
33
+ 'Juvenile Nonfiction': "Children's Nonfiction",
34
+ 'Science': "Nonfiction",
35
+ 'Poetry': "Fiction"}
36
+
37
+ books["simple_categories"] = books["categories"].map(category_mapping)
38
+
39
+ # %%
40
+ books
41
+
42
+ # %%
43
+ from transformers import pipeline
44
+
45
+ # %%
46
+
47
+
48
+ fiction_categories = ["Fiction", "Nonfiction"]
49
+
50
+ pipe = pipeline("zero-shot-classification",
51
+ model="facebook/bart-large-mnli",
52
+ device=0) # use your first CUDA GPU
53
+
54
+ # %%
55
+ sequence = books.loc[books["simple_categories"] == "Fiction", "description"].reset_index(drop=True)[0]
56
+
57
+ # %%
58
+ pipe(sequence, fiction_categories)
59
+
60
+ # %%
61
+ import numpy as np
62
+
63
+ max_index = np.argmax(pipe(sequence, fiction_categories)["scores"])
64
+ max_label = pipe(sequence, fiction_categories)["labels"][max_index]
65
+ max_label
66
+
67
+ # %%
68
+ def generate_predictions(sequence, categories):
69
+ predictions = pipe(sequence, categories)
70
+ max_index = np.argmax(predictions["scores"])
71
+ max_label = predictions["labels"][max_index]
72
+ return max_label
73
+
74
+ # %%
75
+ from tqdm import tqdm
76
+
77
+ actual_cats = []
78
+ predicted_cats = []
79
+
80
+ for i in tqdm(range(0, 300)):
81
+ sequence = books.loc[books["simple_categories"] == "Fiction", "description"].reset_index(drop=True)[i]
82
+ predicted_cats += [generate_predictions(sequence, fiction_categories)]
83
+ actual_cats += ["Fiction"]
84
+
85
+ # %%
86
+ for i in tqdm(range(0, 300)):
87
+ sequence = books.loc[books["simple_categories"] == "Nonfiction", "description"].reset_index(drop=True)[i]
88
+ predicted_cats += [generate_predictions(sequence, fiction_categories)]
89
+ actual_cats += ["Nonfiction"]
90
+
91
+ # %%
92
+ predictions_df = pd.DataFrame({"actual_categories": actual_cats, "predicted_categories": predicted_cats})
93
+ predictions_df
94
+
95
+ # %%
96
+ predictions_df["correct_prediction"] = (
97
+ np.where(predictions_df["actual_categories"] == predictions_df["predicted_categories"], 1, 0)
98
+ )
99
+
100
+ # %%
101
+ predictions_df["correct_prediction"].sum() / len(predictions_df)
102
+
103
+ # %%
104
+ isbns = []
105
+ predicted_cats = []
106
+
107
+ missing_cats = books.loc[books["simple_categories"].isna(), ["isbn13", "description"]].reset_index(drop=True)
108
+
109
+ # %%
110
+ for i in tqdm(range(0, len(missing_cats))):
111
+ sequence = missing_cats["description"][i]
112
+ predicted_cats += [generate_predictions(sequence, fiction_categories)]
113
+ isbns += [missing_cats["isbn13"][i]]
114
+
115
+ # %%
116
+ missing_predicted_df = pd.DataFrame({"isbn13": isbns, "predicted_categories": predicted_cats})
117
+
118
+ # %%
119
+ missing_predicted_df
120
+
121
+ # %%
122
+ books = pd.merge(books, missing_predicted_df, on="isbn13", how="left")
123
+ books["simple_categories"] = np.where(books["simple_categories"].isna(), books["predicted_categories"], books["simple_categories"])
124
+ books = books.drop(columns = ["predicted_categories"])
125
+
126
+ # %%
127
+ books
128
+
129
+ # %%
130
+ books[books["categories"].str.lower().isin([
131
+ "romance",
132
+ "science fiction",
133
+ "scifi",
134
+ "fantasy",
135
+ "horror",
136
+ "mystery",
137
+ "thriller",
138
+ "comedy",
139
+ "crime",
140
+ "historical"
141
+ ])]
142
+
143
+ # %%
144
+ books.to_csv("books_with_categories.csv", index=False)
145
+
146
+
147
+
src/vector_search.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.document_loaders import TextLoader
2
+
3
+ # %%
4
+ from langchain_text_splitters import CharacterTextSplitter
5
+ from langchain_openai import OpenAIEmbeddings
6
+
7
+
8
+ # %%
9
+ from langchain_chroma import Chroma
10
+
11
+ # %%
12
+ from dotenv import load_dotenv
13
+
14
+ load_dotenv()
15
+
16
+ # %% [markdown]
17
+ # It will show true when we have the keys for open ai and hugging face stored in the .env file and here dotenv will write them to the environment
18
+
19
+ # %%
20
+ import pandas as pd
21
+
22
+ books = pd.read_csv("books_cleaned.csv")
23
+
24
+ # %%
25
+ books.head(5)
26
+
27
+ # %%
28
+ books["tagged_description"]
29
+
30
+ # %% [markdown]
31
+ # # We created the tagged description so that we can build our vector search which requires a unique identity.
32
+
33
+ # %%
34
+ books["tagged_description"].str.cat(sep='\n')
35
+ with open("tagged_description.txt", "w") as f:
36
+ f.write(books["tagged_description"].str.cat(sep='\n'))
37
+
38
+
39
+ # %% [markdown]
40
+ # # In langchain it does not work with pandas dataframe so we need to save only the tag descriptions in the text file.
41
+
42
+ # %% [markdown]
43
+ # we did not use string match because it is not efficient and slow
44
+
45
+ # %% [markdown]
46
+ # # Ask for manshu explain the code
47
+
48
+ # %%
49
+ raw_documents = TextLoader("tagged_description.txt").load()
50
+ text_splitter = CharacterTextSplitter(chunk_size=1, chunk_overlap=0, separator="\n")
51
+ documents = text_splitter.split_documents(raw_documents)
52
+
53
+ # %% [markdown]
54
+ # # The rason we are setting it to chunk size 0 is because it first tries to look for the closest separator to the index number indicated by the chunks nad basically if this is more than one there's a chance it may not split on a new line it will split by chunk size so by setting it to zero we make sure that it priortize splitting on the separator rather than trying to split on the chunk size
55
+
56
+ # %% [markdown]
57
+ # # Chunk size 0 did not work but it worked fine for chunk size=1
58
+
59
+ # %%
60
+ documents[0]
61
+
62
+ # %%
63
+ db_books = Chroma.from_documents(
64
+ documents,
65
+ embedding=OpenAIEmbeddings())
66
+
67
+ # %%
68
+ query = "A book to teach children about nature"
69
+ docs = db_books.similarity_search(query, k = 10)
70
+ docs
71
+
72
+ # %%
73
+ books[books["isbn13"] == int(docs[0].page_content.split()[0].strip())]
74
+
75
+ # %%
76
+ def retrieve_semantic_recommendations(
77
+ query: str,
78
+ top_k: int = 10,
79
+ ) -> pd.DataFrame:
80
+ recs = db_books.similarity_search(query, k = 50)
81
+
82
+ books_list = []
83
+
84
+ for i in range(0, len(recs)):
85
+ books_list += [int(recs[i].page_content.strip('"').split()[0])]
86
+
87
+ return books[books["isbn13"].isin(books_list)]
88
+ retrieve_semantic_recommendations("A book to teach children about nature")
89
+
90
+ # %%
91
+
92
+
93
+