Compare commits
101 Commits
Author | SHA1 | Date | |
---|---|---|---|
4580f67920 | |||
2f2da60626 | |||
73c7e5e6c2 | |||
e45423aaa4 | |||
9a473edfdc | |||
1c34d2876f | |||
6af38c686f | |||
5ef60d340e | |||
29b5959623 | |||
4f116bc8bf | |||
6ebe7d03fc | |||
8e8592bb29 | |||
f9c70a8ee4 | |||
0f35ae691e | |||
9193e6b3e6 | |||
63895953c0 | |||
ac6d85fa99 | |||
44c0c189d7 | |||
b75ede5d89 | |||
84deaa2f64 | |||
26527e83c3 | |||
2642423289 | |||
53a7b07c06 | |||
20afb205e3 | |||
bd53a83058 | |||
880cb6ba7e | |||
0529172af2 | |||
3a14e32e58 | |||
07da41f1f5 | |||
87433cc97f | |||
05a51cd5e6 | |||
bcec24fbf7 | |||
08fb19c6b9 | |||
e8871f823e | |||
558b9d4eba | |||
5e6dc9ffe2 | |||
f5c3077cb4 | |||
cbca64aaf3 | |||
6d502af029 | |||
685d4b2ccd | |||
e9eae631b4 | |||
0486bf1384 | |||
93230c4285 | |||
dfa2f89f29 | |||
d6dda44ae8 | |||
46fe3390b0 | |||
5f812a6b85 | |||
ab97f8e390 | |||
7c1585120e | |||
51f80e0867 | |||
4d3925b193 | |||
e599a851cf | |||
9373e66e29 | |||
1a82cac16f | |||
936558779b | |||
3032dea8a0 | |||
a2d747e41e | |||
7c168f3532 | |||
92d69cf7cb | |||
5a229a4b2b | |||
aa2e5a41bb | |||
9529cf2970 | |||
b854b38349 | |||
54e9f0e8fc | |||
b45561ceca | |||
bd17ac4906 | |||
6e64e76310 | |||
3c0f1b18b4 | |||
33ba27e2d0 | |||
96258ae19b | |||
b729bb61c4 | |||
9ce65f52f1 | |||
f3bc194488 | |||
9bcd35a48d | |||
496a733cb2 | |||
7d5a3ad2c5 | |||
f1021fd6a0 | |||
d5f9a0b1b4 | |||
077f850808 | |||
c13a500420 | |||
e5d2ca532c | |||
efc309c962 | |||
a6d9a75030 | |||
7af5109e7f | |||
c9c2d75377 | |||
199fab7875 | |||
36baf1aaec | |||
81fa6ca4d6 | |||
1f5dea6aff | |||
16cc68dfed | |||
da9569fd4c | |||
aa95a9b16b | |||
39930d1233 | |||
f2fad859dc | |||
ef7fceacea | |||
5f7366e556 | |||
7f51f139f2 | |||
ce99e5301b | |||
8778cfdae6 | |||
3588587c92 | |||
65e8948202 |
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,3 +1,7 @@
|
||||
__pycache__
|
||||
*.html
|
||||
.venv
|
||||
neuralWeights.json
|
||||
neuralWeights.json.*
|
||||
.imgLinkCache.json
|
||||
.idea
|
||||
|
@ -1,4 +1,4 @@
|
||||
# CaliGraph
|
||||
CaliGraph connects to the database of your local Calibre-Instance in order to recommend which unread books are likely to be enjoyed. The recommendations are displayed in a graph which explains the recommendation by showing correlations to previously read books, authors, recommendations by same individuals, tags...
|
||||
|
||||
![Screenshot](https://gitea.dominik.roth.ml/dodox/CaliGraph/raw/branch/master/Screenshot_README.png)
|
||||
![Screenshot](./Screenshot_README.png)
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 428 KiB After Width: | Height: | Size: 243 KiB |
1011
caliGraph.py
1011
caliGraph.py
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
{"topList": 0.8269026935523768, "recommender": 0.5687397308864482, "author": 0.9602300111040548, "series": 0.0015315822252870478, "tag": 0.0028789924118737056, "mu": 0.48730278196967397, "sigma": 0.02394878388858184, "se": 0.45207554705083647, "bias": 0.555294008129175}
|
BIN
perfTests/1645539905.png
Normal file
BIN
perfTests/1645539905.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.5 MiB |
3004
rec_dbs/mrb_db.csv
Normal file
3004
rec_dbs/mrb_db.csv
Normal file
File diff suppressed because it is too large
Load Diff
2691
rec_dbs/tgb_1.csv
Normal file
2691
rec_dbs/tgb_1.csv
Normal file
File diff suppressed because it is too large
Load Diff
1318
rec_dbs/tgb_2.csv
Normal file
1318
rec_dbs/tgb_2.csv
Normal file
File diff suppressed because it is too large
Load Diff
14
requirements.txt
Normal file
14
requirements.txt
Normal file
@ -0,0 +1,14 @@
|
||||
numpy
|
||||
scipy
|
||||
matplotlib
|
||||
networkx
|
||||
pyvis
|
||||
fuzzywuzzy
|
||||
rake_nltk
|
||||
ptpython
|
||||
requests
|
||||
pandas
|
||||
plotly
|
||||
wikipedia
|
||||
argcomplete
|
||||
pyzshcomplete
|
155
zSearch.py
Normal file
155
zSearch.py
Normal file
@ -0,0 +1,155 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
class Search:
|
||||
|
||||
def __init__(self):
|
||||
self.searchType = "general"
|
||||
self.searchOptions = {"e": "", "yearFrom": "", \
|
||||
"yearTo": "", "language": "", \
|
||||
"extension": "", "page":"1", \
|
||||
"order":""}
|
||||
self.searchResults = "books"
|
||||
self.input = ""
|
||||
|
||||
def nextPage(self):
|
||||
'''Goes to next page'''
|
||||
self.searchOptions["page"] = str(int(self.searchOptions["page"])+1)
|
||||
|
||||
def previousPage(self):
|
||||
'''Goes to previous page'''
|
||||
if self.searchOptions["page"] != "1":
|
||||
self.searchOptions["page"] = str(int(self.searchOptions["page"])-1)
|
||||
else:
|
||||
return 1
|
||||
|
||||
def updateSearchOptions(self):
|
||||
'''Depending on searchType and searchResults, updates appropriately
|
||||
searchOptions
|
||||
'''
|
||||
if self.searchType == "general":
|
||||
self.searchOptions.pop("matchPhrase", None)
|
||||
else:
|
||||
self.searchOptions["matchPhrase"] = ""
|
||||
|
||||
if self.searchResults == "books":
|
||||
self.searchOptions["language"] = ""
|
||||
self.searchOptions["extension"] = ""
|
||||
else:
|
||||
self.searchOptions.pop("language", None)
|
||||
self.searchOptions.pop("extension", None)
|
||||
|
||||
def executeSearch(self):
|
||||
'''Executes get request and returns response'''
|
||||
if self.searchResults == "books":
|
||||
url = "http://b-ok.cc"
|
||||
else:
|
||||
url = "http://booksc.xyz"
|
||||
|
||||
r = requests.get(url+"/s/"+self.input, params=self.searchOptions)
|
||||
return r
|
||||
|
||||
|
||||
def getResults(self):
|
||||
'''Fetches search results. Returns a list of books
|
||||
'''
|
||||
r = self.executeSearch()
|
||||
soup = BeautifulSoup(r.text, "html.parser")
|
||||
match = soup.find_all(class_="resItemBox")
|
||||
counter = 1
|
||||
results=[]
|
||||
for e in match:
|
||||
title = e.find(itemprop="name")
|
||||
author = e.find(class_="authors")
|
||||
year = e.find(class_="bookProperty property_year")
|
||||
language = e.find(class_="bookProperty property_language")
|
||||
fileInfo = e.find(class_="bookProperty property__file")
|
||||
link = e.find("a", href=True)
|
||||
link = link["href"]
|
||||
|
||||
if self.searchResults == "books":
|
||||
fullLink = "https://b-ok.cc" + link
|
||||
else:
|
||||
fullLink = "booksc.xyz" + link
|
||||
|
||||
title = isNone(title)
|
||||
author = isNone(author)
|
||||
year = isNone(year)
|
||||
language = isNone(language)
|
||||
fileInfo = isNone(fileInfo)
|
||||
|
||||
book = Book(title,author,year,language,fileInfo,fullLink)
|
||||
results += [book]
|
||||
|
||||
return results
|
||||
|
||||
def reset(self):
|
||||
'''Clears the search and resets to default options'''
|
||||
self.searchType = "general"
|
||||
self.searchOptions = {"e": "", "yearFrom": "", \
|
||||
"yearTo": "", "language": "", \
|
||||
"extension": "", "page":"1", \
|
||||
"order":""}
|
||||
self.searchResults = "books"
|
||||
self.input = ""
|
||||
|
||||
|
||||
def isNone(e):
|
||||
if e != None:
|
||||
return "".join(e.text.splitlines())
|
||||
else:
|
||||
return ""
|
||||
|
||||
class Book:
|
||||
|
||||
def __init__(self,title,author,year,language,fileInfo,link):
|
||||
self.title = title
|
||||
self.author = author
|
||||
self.year = year
|
||||
self.language = language
|
||||
self.fType, self.size = fileInfo.split(', ')
|
||||
self.link = link
|
||||
self.dlLink = None
|
||||
self.page = None
|
||||
|
||||
def __repr__(self):
|
||||
return '<zBook: '+str(self)+'>'
|
||||
|
||||
def __str__(self):
|
||||
return " / ".join([self.title,self.author,self.year,\
|
||||
self.language,self.fType,self.size])
|
||||
|
||||
def getDetails(self):
|
||||
'''Returns more specific info about the book. The info is retrieved by the
|
||||
link attribute
|
||||
'''
|
||||
if self.page == None:
|
||||
self.page = requests.get(self.link).text
|
||||
|
||||
soup = BeautifulSoup(self.page, "html.parser")
|
||||
# for some reason, bookProperty also shows properties from other books
|
||||
# the line below prevents this
|
||||
soup = soup.find(class_="row cardBooks")
|
||||
match = soup.find_all(class_="bookProperty")
|
||||
|
||||
results = ""
|
||||
for e in match:
|
||||
results += "".join(e.text.splitlines())
|
||||
results += "\n"
|
||||
|
||||
# this makes writing the category easier for some books
|
||||
results = results.replace("\\\\", " \\ ")
|
||||
return results
|
||||
|
||||
def getDownloadURL(self):
|
||||
if self.dlLink == None:
|
||||
if self.page == None:
|
||||
self.page = requests.get(self.link).text
|
||||
soup = BeautifulSoup(self.page, "html.parser")
|
||||
self.dlLink = soup.find('a', 'btn btn-primary dlButton addDownloadedBook')['href']
|
||||
return 'https://b-ok.cc' + self.dlLink + '?dsource=recommend'
|
||||
|
||||
def saveBook(self, path):
|
||||
r = requests.get(self.getDownloadURL(), allow_redirects=True)
|
||||
with open(path, 'wb') as f:
|
||||
f.write(r.content)
|
Loading…
Reference in New Issue
Block a user