Created
August 17, 2018 01:36
-
-
Save luskan/66ffb8f82afb96d29d3f56a730340adc to your computer and use it in GitHub Desktop.
Uses fogbugz api to download cases with attachments to sqlite db, it also downloads wikis.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
''' | |
fogbugs_downloader.py :: uses fogbugz api to download cases with attachments to sqlite db, it also | |
downloads wikis. Uses pycookiecheat to download attachments (see here https://stackoverflow.com/q/51498991/471160). | |
tbd: test under linux | |
tbd: download wiki attached files | |
''' | |
import sys | |
import time | |
import html | |
import os | |
import traceback | |
# For win32 pycookiecheat tweaks see: https://github.com/luskan/pycookiecheat | |
from pycookiecheat import chrome_cookies | |
import requests | |
import datetime | |
from fogbugz import FogBugz | |
from datetime import datetime | |
import sqlite3 | |
# fogbugz_downloader_settings file should contain: | |
# S_FOGBUGZ_URL = '' | |
# S_EMAIL = '' | |
# S_PASSWORD = '' | |
# S_TOKEN = '' | |
import fogbugz_downloader_settings | |
userhome = os.path.expanduser('~') | |
if sys.platform == 'darwin': | |
cookies_path = userhome + '/Library/Application Support/Google/Chrome/Default/Cookies' | |
elif sys.platform.startswith('win32'): | |
#cookies_path = userhome + r'\AppData\Local\Google\Chrome\User Data\Default\Cookies' | |
#cookies_path = userhome + r'\AppData\Local\Google\Chrome\User Data\Profile 1\Cookies'; | |
cookies_path = userhome + r'\AppData\Local\Google\Chrome\User Data\Profile 2\Cookies'; | |
else: | |
raise OSError("tbd: linux.") | |
columns = ''' | |
ixBug,ixBugParent,ixBugChildren,tags,fOpen,sTitle,sOriginalTitle,sLatestTextSummary, | |
ixBugEventLatestText,ixProject,sProject, | |
ixArea,sArea,ixGroup,ixPersonAssignedTo,sPersonAssignedTo,sEmailAssignedTo,ixPersonOpenedBy, | |
ixPersonClosedBy,ixPersonResolvedBy,ixPersonLastEditedBy, | |
ixStatus,sStatus,ixBugDuplicates,ixBugOriginal,ixPriority,sPriority,ixFixFor,sFixFor,dtFixFor, | |
sVersion,sComputer,hrsOrigEst,hrsCurrEst,hrsElapsedExtra,hrsElapsed,c,sCustomerEmail, | |
ixMailbox,ixCategory,sCategory,dtOpened,dtResolved,dtClosed,ixBugEventLatest,dtLastUpdated, | |
fReplied,fForwarded,sTicket,ixDiscussTopic,dtDue,sReleaseNotes,ixBugEventLastView, | |
dtLastView,ixRelatedBugs,sScoutDescription,sScoutMessage,fScoutStopReporting,dtLastOccurrence, | |
fSubscribed,dblStoryPts,nFixForOrder,events,minievents,ixKanbanColumn2 | |
''' | |
def store_case(db, case): | |
""" | |
Args: | |
db: sqlite db | |
case: case object | |
Returns: | |
none | |
""" | |
#print(case) | |
print(case.ixBug.string) | |
for att in case.rgAttachments: | |
file_name = att.sFileName.string | |
url = fogbugz_downloader_settings.S_FOGBUGZ_URL \ | |
+ html.unescape(att.sURL.string) | |
print(" --- %s:%s " % (att.sFileName.string, url)) | |
cookies = chrome_cookies(url, cookie_file = cookies_path) | |
for tries in range(10): | |
try: | |
r = requests.get(url, cookies=cookies, stream=True) | |
break | |
except BaseException as error: | |
print('An exception at try {} occurred: {}, {}'.format(tries, error, traceback.format_exc())) | |
time.sleep(tries * 10) | |
if r.status_code == 200: | |
r.raw.decode_content = True | |
cursor = db.cursor() | |
cursor.execute('''INSERT INTO attachments(file_name, ix_bug, ix_bug_event, raw) | |
VALUES(?,?,?,?)''', (file_name, case.ixBug.string, case.ixBugEvent.string, sqlite3.Binary(r.content))) | |
db.commit() | |
cursor = db.cursor() | |
cursor.execute('''INSERT INTO cases(case_id, title, raw) | |
VALUES(?,?,?)''', (case.ixBug.string, case.sTitle.string, str(case))) | |
db.commit() | |
def store_wiki(db, total, wiki, article, full_article): | |
print('#%s %s,%s,%s,%s - %s %s' | |
% (total, | |
wiki.ixWiki.string, wiki.sWiki.string, wiki.sTagLineHTML.string, | |
wiki.ixWikiPageRoot.string, | |
article.ixWikiPage.string, str(article.sHeadline.string))) | |
cursor = db.cursor() | |
cursor.execute('''INSERT INTO articles(ix_wiki, ix_wiki_page_root, s_wiki, s_tag_line_html, | |
ix_wiki_page, s_head_line, | |
s_body) | |
VALUES(?,?,?,?,?,?,?)''', | |
( | |
wiki.ixWiki.string, wiki.ixWikiPageRoot.string, | |
wiki.sWiki.string, wiki.sTagLineHTML.string, | |
article.ixWikiPage.string, str(article.sHeadline.string), | |
full_article.sBody.string | |
)) | |
db.commit() | |
def main(): | |
if not os.path.isdir("fb"): | |
os.mkdir("fb") | |
fb = FogBugz(fogbugz_downloader_settings.S_FOGBUGZ_URL, fogbugz_downloader_settings.S_TOKEN, api_version=8) | |
fb.logon(fogbugz_downloader_settings.S_EMAIL, fogbugz_downloader_settings.S_PASSWORD) | |
if os.path.isfile('fb/fogbugz.sqlite3'): | |
os.remove('fb/fogbugz.sqlite3') | |
db = sqlite3.connect('fb/fogbugz.sqlite3') | |
cursor = db.cursor() | |
cursor.execute(''' | |
CREATE TABLE cases(id INTEGER PRIMARY KEY AUTOINCREMENT, case_id INTEGER, | |
title TEXT, | |
raw TEXT) | |
''') | |
cursor.execute(''' | |
CREATE TABLE attachments(id INTEGER PRIMARY KEY AUTOINCREMENT, | |
file_name TEXT, | |
ix_bug_event INTEGER, | |
ix_bug INTEGER, | |
raw BLOB) | |
''') | |
cursor.execute(''' | |
CREATE TABLE articles(id INTEGER PRIMARY KEY AUTOINCREMENT, | |
ix_wiki INTEGER, ix_wiki_page_root INTEGER, | |
s_wiki TEXT, s_tag_line_html TEXT, | |
ix_wiki_page INTEGER, s_head_line TEXT, | |
s_body TEXT) | |
''') | |
db.commit() | |
case_id = 0 | |
case_range = 20 | |
last_case_id = 70000 | |
while case_id < last_case_id: | |
print('%s: %s / %s' % (datetime.now(), case_id, last_case_id)) | |
caseIds = ','.join([str(e) for e in list(range(case_id, case_id + case_range))]) | |
respCases = fb.search(q=caseIds, cols=columns.replace('\n', '')) | |
for case in respCases.find_all('case'): | |
store_case(db, case) | |
case_id += case_range | |
respWikis = fb.listWikis() | |
total = 0 | |
for wiki in respWikis.find_all('wiki'): | |
# print(respWikis.prettify()) | |
respArticles = fb.listArticles(ixWiki=wiki.ixWiki.string) | |
for article in respArticles.find_all('article'): | |
total += 1 | |
full_article = fb.viewArticle(ixWikiPage=article.ixWikiPage.string) | |
store_wiki(db, total, wiki, article, full_article) | |
fb.logoff() | |
db.close() | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment