in SupportScripts/Python/DownloadGroupFeed/download.py [0:0]
def getFeed(group, name):
# Token-based auth header
headers = {'Authorization': 'Bearer ' + TOKEN, "User-Agent": "GithubRep-DownloadGroupFeed"}
# Get the relevant group post content for each feed item
# Include a fetch for like and comment summaries to get total count
# No need to fetch actual likes & comments, so set the limit to 0
params = "?fields=permalink_url,from,story,type,message,link,created_time,updated_time,likes.limit(0).summary(total_count),comments.limit(0).summary(total_count)"
# Default paging limit
params += "&limit=" + DEFAULT_LIMIT
# Time-based limit
params += "&since=" + SINCE.strftime("%s")
graph_url = GRAPH_URL_PREFIX + group + "/feed" + params
result = requests.get(graph_url, headers=headers)
result_json = json.loads(result.text, result.encoding)
feed = []
# Got an error? Ok let's break out
if "error" in result_json:
print "Error", result_json["error"]["message"]
return []
# Did we get back data?
if "data" in result_json:
for feed_item in result_json["data"]:
# Convenience: Add empty field for message / link if not existent
feed_item["message"] = feed_item["message"] if "message" in feed_item else ""
feed_item["link"] = feed_item["link"] if "link" in feed_item else ""
feed.append(feed_item)
return feed