Compare commits
5 Commits
fix-flashe
...
use-dateti
Author | SHA1 | Date | |
---|---|---|---|
ab76226b0c | |||
a4ebef6e6f | |||
bad50efa9b | |||
629fc063db | |||
3b0baa21de |
@@ -21,6 +21,8 @@ import dateutil.parser
|
||||
|
||||
from whoosh.qparser import MultifieldParser, QueryParser
|
||||
from whoosh.analysis import StemmingAnalyzer
|
||||
from whoosh.qparser.dateparse import DateParserPlugin
|
||||
from whoosh import fields, index
|
||||
|
||||
|
||||
"""
|
||||
@@ -180,30 +182,38 @@ class Search:
|
||||
# is defined.
|
||||
|
||||
schema = Schema(
|
||||
id = ID(stored=True, unique=True),
|
||||
kind = ID(stored=True),
|
||||
id = fields.ID(stored=True, unique=True),
|
||||
kind = fields.ID(stored=True),
|
||||
|
||||
created_time = ID(stored=True),
|
||||
modified_time = ID(stored=True),
|
||||
indexed_time = ID(stored=True),
|
||||
created_time = fields.DATETIME(stored=True),
|
||||
modified_time = fields.DATETIME(stored=True),
|
||||
indexed_time = fields.DATETIME(stored=True),
|
||||
|
||||
title = TEXT(stored=True, field_boost=100.0),
|
||||
url = ID(stored=True, unique=True),
|
||||
|
||||
mimetype=ID(stored=True),
|
||||
owner_email=ID(stored=True),
|
||||
owner_name=TEXT(stored=True),
|
||||
|
||||
repo_name=TEXT(stored=True),
|
||||
repo_url=ID(stored=True),
|
||||
title = fields.TEXT(stored=True, field_boost=100.0),
|
||||
|
||||
github_user=TEXT(stored=True),
|
||||
url = fields.ID(stored=True),
|
||||
|
||||
mimetype = fields.TEXT(stored=True),
|
||||
|
||||
owner_email = fields.ID(stored=True),
|
||||
owner_name = fields.TEXT(stored=True),
|
||||
|
||||
# mainly for email threads, groups.io, hypothesis
|
||||
group = fields.ID(stored=True),
|
||||
|
||||
repo_name = fields.TEXT(stored=True),
|
||||
repo_url = fields.ID(stored=True),
|
||||
github_user = fields.TEXT(stored=True),
|
||||
|
||||
tags = fields.KEYWORD(commas=True,
|
||||
stored=True,
|
||||
lowercase=True),
|
||||
|
||||
# comments only
|
||||
issue_title=TEXT(stored=True, field_boost=100.0),
|
||||
issue_url=ID(stored=True),
|
||||
issue_title = fields.TEXT(stored=True, field_boost=100.0),
|
||||
issue_url = fields.ID(stored=True),
|
||||
|
||||
content=TEXT(stored=True, analyzer=stemming_analyzer)
|
||||
content = fields.TEXT(stored=True, analyzer=stemming_analyzer)
|
||||
)
|
||||
|
||||
|
||||
@@ -243,24 +253,32 @@ class Search:
|
||||
writer.delete_by_term('id',item['id'])
|
||||
|
||||
# Index a plain google drive file
|
||||
writer.add_document(
|
||||
id = item['id'],
|
||||
kind = 'gdoc',
|
||||
created_time = item['createdTime'],
|
||||
modified_time = item['modifiedTime'],
|
||||
indexed_time = datetime.now().replace(microsecond=0).isoformat(),
|
||||
title = item['name'],
|
||||
url = item['webViewLink'],
|
||||
mimetype = mimetype,
|
||||
owner_email = item['owners'][0]['emailAddress'],
|
||||
owner_name = item['owners'][0]['displayName'],
|
||||
repo_name='',
|
||||
repo_url='',
|
||||
github_user='',
|
||||
issue_title='',
|
||||
issue_url='',
|
||||
content = content
|
||||
)
|
||||
created_time = dateutil.parser.parse(item['createdTime'])
|
||||
modified_time = dateutil.parser.parse(item['modifiedTime'])
|
||||
indexed_time = datetime.now().replace(microsecond=0)
|
||||
try:
|
||||
writer.add_document(
|
||||
id = item['id'],
|
||||
kind = 'gdoc',
|
||||
created_time = created_time,
|
||||
modified_time = modified_time,
|
||||
indexed_time = indexed_time,
|
||||
title = item['name'],
|
||||
url = item['webViewLink'],
|
||||
mimetype = mimetype,
|
||||
owner_email = item['owners'][0]['emailAddress'],
|
||||
owner_name = item['owners'][0]['displayName'],
|
||||
group='',
|
||||
repo_name='',
|
||||
repo_url='',
|
||||
github_user='',
|
||||
issue_title='',
|
||||
issue_url='',
|
||||
content = content
|
||||
)
|
||||
except ValueError as e:
|
||||
print(repr(e))
|
||||
print(" > XXXXXX Failed to index Google Drive file \"%s\""%(item['name']))
|
||||
|
||||
|
||||
else:
|
||||
@@ -314,7 +332,7 @@ class Search:
|
||||
)
|
||||
assert output == ""
|
||||
except RuntimeError:
|
||||
print(" > XXXXXX Failed to index document \"%s\""%(item['name']))
|
||||
print(" > XXXXXX Failed to index Google Drive document \"%s\""%(item['name']))
|
||||
|
||||
|
||||
# If export was successful, read contents of markdown
|
||||
@@ -342,24 +360,33 @@ class Search:
|
||||
else:
|
||||
print(" > Creating a new record")
|
||||
|
||||
writer.add_document(
|
||||
id = item['id'],
|
||||
kind = 'gdoc',
|
||||
created_time = item['createdTime'],
|
||||
modified_time = item['modifiedTime'],
|
||||
indexed_time = datetime.now().replace(microsecond=0).isoformat(),
|
||||
title = item['name'],
|
||||
url = item['webViewLink'],
|
||||
mimetype = mimetype,
|
||||
owner_email = item['owners'][0]['emailAddress'],
|
||||
owner_name = item['owners'][0]['displayName'],
|
||||
repo_name='',
|
||||
repo_url='',
|
||||
github_user='',
|
||||
issue_title='',
|
||||
issue_url='',
|
||||
content = content
|
||||
)
|
||||
try:
|
||||
created_time = dateutil.parser.parse(item['createdTime'])
|
||||
modified_time = dateutil.parser.parse(item['modifiedTime'])
|
||||
indexed_time = datetime.now()
|
||||
writer.add_document(
|
||||
id = item['id'],
|
||||
kind = 'gdoc',
|
||||
created_time = created_time,
|
||||
modified_time = modified_time,
|
||||
indexed_time = indexed_time,
|
||||
title = item['name'],
|
||||
url = item['webViewLink'],
|
||||
mimetype = mimetype,
|
||||
owner_email = item['owners'][0]['emailAddress'],
|
||||
owner_name = item['owners'][0]['displayName'],
|
||||
group='',
|
||||
repo_name='',
|
||||
repo_url='',
|
||||
github_user='',
|
||||
issue_title='',
|
||||
issue_url='',
|
||||
content = content
|
||||
)
|
||||
except ValueError as e:
|
||||
print(repr(e))
|
||||
print(" > XXXXXX Failed to index Google Drive file \"%s\""%(item['name']))
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -393,31 +420,36 @@ class Search:
|
||||
issue_comment_content += comment.body.rstrip()
|
||||
issue_comment_content += "\n"
|
||||
|
||||
# Now create the actual search index record
|
||||
created_time = clean_timestamp(issue.created_at)
|
||||
modified_time = clean_timestamp(issue.updated_at)
|
||||
indexed_time = clean_timestamp(datetime.now())
|
||||
|
||||
# Now create the actual search index record.
|
||||
# Add one document per issue thread,
|
||||
# containing entire text of thread.
|
||||
writer.add_document(
|
||||
id = issue.html_url,
|
||||
kind = 'issue',
|
||||
created_time = created_time,
|
||||
modified_time = modified_time,
|
||||
indexed_time = indexed_time,
|
||||
title = issue.title,
|
||||
url = issue.html_url,
|
||||
mimetype='',
|
||||
owner_email='',
|
||||
owner_name='',
|
||||
repo_name = repo_name,
|
||||
repo_url = repo_url,
|
||||
github_user = issue.user.login,
|
||||
issue_title = issue.title,
|
||||
issue_url = issue.html_url,
|
||||
content = issue_comment_content
|
||||
)
|
||||
|
||||
created_time = issue.created_at
|
||||
modified_time = issue.updated_at
|
||||
indexed_time = datetime.now()
|
||||
try:
|
||||
writer.add_document(
|
||||
id = issue.html_url,
|
||||
kind = 'issue',
|
||||
created_time = created_time,
|
||||
modified_time = modified_time,
|
||||
indexed_time = indexed_time,
|
||||
title = issue.title,
|
||||
url = issue.html_url,
|
||||
mimetype='',
|
||||
owner_email='',
|
||||
owner_name='',
|
||||
group='',
|
||||
repo_name = repo_name,
|
||||
repo_url = repo_url,
|
||||
github_user = issue.user.login,
|
||||
issue_title = issue.title,
|
||||
issue_url = issue.html_url,
|
||||
content = issue_comment_content
|
||||
)
|
||||
except ValueError as e:
|
||||
print(repr(e))
|
||||
print(" > XXXXXX Failed to index Github issue \"%s\""%(issue.title))
|
||||
|
||||
|
||||
|
||||
@@ -447,7 +479,8 @@ class Search:
|
||||
print(" > XXXXXXXX Failed to find file info.")
|
||||
return
|
||||
|
||||
indexed_time = clean_timestamp(datetime.now())
|
||||
|
||||
indexed_time = datetime.now()
|
||||
|
||||
if fext in MARKDOWN_EXTS:
|
||||
print("Indexing markdown doc %s from repo %s"%(fname,repo_name))
|
||||
@@ -476,24 +509,31 @@ class Search:
|
||||
usable_url = "https://github.com/%s/blob/master/%s"%(repo_name, fpath)
|
||||
|
||||
# Now create the actual search index record
|
||||
writer.add_document(
|
||||
id = fsha,
|
||||
kind = 'markdown',
|
||||
created_time = '',
|
||||
modified_time = '',
|
||||
indexed_time = indexed_time,
|
||||
title = fname,
|
||||
url = usable_url,
|
||||
mimetype='',
|
||||
owner_email='',
|
||||
owner_name='',
|
||||
repo_name = repo_name,
|
||||
repo_url = repo_url,
|
||||
github_user = '',
|
||||
issue_title = '',
|
||||
issue_url = '',
|
||||
content = content
|
||||
)
|
||||
try:
|
||||
writer.add_document(
|
||||
id = fsha,
|
||||
kind = 'markdown',
|
||||
created_time = None,
|
||||
modified_time = None,
|
||||
indexed_time = indexed_time,
|
||||
title = fname,
|
||||
url = usable_url,
|
||||
mimetype='',
|
||||
owner_email='',
|
||||
owner_name='',
|
||||
group='',
|
||||
repo_name = repo_name,
|
||||
repo_url = repo_url,
|
||||
github_user = '',
|
||||
issue_title = '',
|
||||
issue_url = '',
|
||||
content = content
|
||||
)
|
||||
except ValueError as e:
|
||||
print(repr(e))
|
||||
print(" > XXXXXX Failed to index Github markdown file \"%s\""%(fname))
|
||||
|
||||
|
||||
|
||||
else:
|
||||
print("Indexing github file %s from repo %s"%(fname,repo_name))
|
||||
@@ -501,24 +541,29 @@ class Search:
|
||||
key = fname+"_"+fsha
|
||||
|
||||
# Now create the actual search index record
|
||||
writer.add_document(
|
||||
id = key,
|
||||
kind = 'ghfile',
|
||||
created_time = '',
|
||||
modified_time = '',
|
||||
indexed_time = indexed_time,
|
||||
title = fname,
|
||||
url = repo_url,
|
||||
mimetype='',
|
||||
owner_email='',
|
||||
owner_name='',
|
||||
repo_name = repo_name,
|
||||
repo_url = repo_url,
|
||||
github_user = '',
|
||||
issue_title = '',
|
||||
issue_url = '',
|
||||
content = ''
|
||||
)
|
||||
try:
|
||||
writer.add_document(
|
||||
id = key,
|
||||
kind = 'ghfile',
|
||||
created_time = None,
|
||||
modified_time = None,
|
||||
indexed_time = indexed_time,
|
||||
title = fname,
|
||||
url = repo_url,
|
||||
mimetype='',
|
||||
owner_email='',
|
||||
owner_name='',
|
||||
group='',
|
||||
repo_name = repo_name,
|
||||
repo_url = repo_url,
|
||||
github_user = '',
|
||||
issue_title = '',
|
||||
issue_url = '',
|
||||
content = ''
|
||||
)
|
||||
except ValueError as e:
|
||||
print(repr(e))
|
||||
print(" > XXXXXX Failed to index Github file \"%s\""%(fname))
|
||||
|
||||
|
||||
|
||||
@@ -532,28 +577,42 @@ class Search:
|
||||
Use a Github file API record to add a filename
|
||||
to the search index.
|
||||
"""
|
||||
indexed_time = clean_timestamp(datetime.now())
|
||||
if 'created_time' in d.keys() and d['created_time'] is not None:
|
||||
created_time = d['created_time']
|
||||
else:
|
||||
created_time = None
|
||||
|
||||
if 'modified_time' in d.keys() and d['modified_time'] is not None:
|
||||
modified_time = d['modified_time']
|
||||
else:
|
||||
modified_time = None
|
||||
|
||||
indexed_time = datetime.now()
|
||||
|
||||
# Now create the actual search index record
|
||||
writer.add_document(
|
||||
id = d['permalink'],
|
||||
kind = 'emailthread',
|
||||
created_time = '',
|
||||
modified_time = '',
|
||||
indexed_time = indexed_time,
|
||||
title = d['subject'],
|
||||
url = d['permalink'],
|
||||
mimetype='',
|
||||
owner_email='',
|
||||
owner_name=d['original_sender'],
|
||||
repo_name = '',
|
||||
repo_url = '',
|
||||
github_user = '',
|
||||
issue_title = '',
|
||||
issue_url = '',
|
||||
content = d['content']
|
||||
)
|
||||
|
||||
try:
|
||||
writer.add_document(
|
||||
id = d['permalink'],
|
||||
kind = 'emailthread',
|
||||
created_time = created_time,
|
||||
modified_time = modified_time,
|
||||
indexed_time = indexed_time,
|
||||
title = d['subject'],
|
||||
url = d['permalink'],
|
||||
mimetype='',
|
||||
owner_email='',
|
||||
owner_name=d['original_sender'],
|
||||
group=d['subgroup'],
|
||||
repo_name = '',
|
||||
repo_url = '',
|
||||
github_user = '',
|
||||
issue_title = '',
|
||||
issue_url = '',
|
||||
content = d['content']
|
||||
)
|
||||
except ValueError as e:
|
||||
print(repr(e))
|
||||
print(" > XXXXXX Failed to index Groups.io thread \"%s\""%(d['subject']))
|
||||
|
||||
|
||||
|
||||
@@ -631,10 +690,10 @@ class Search:
|
||||
full_items[f['id']] = f
|
||||
|
||||
## Shorter:
|
||||
#break
|
||||
# Longer:
|
||||
if nextPageToken is None:
|
||||
break
|
||||
break
|
||||
## Longer:
|
||||
#if nextPageToken is None:
|
||||
# break
|
||||
|
||||
|
||||
writer = self.ix.writer()
|
||||
@@ -642,34 +701,41 @@ class Search:
|
||||
temp_dir = tempfile.mkdtemp(dir=os.getcwd())
|
||||
print("Temporary directory: %s"%(temp_dir))
|
||||
|
||||
try:
|
||||
|
||||
# Drop any id in indexed_ids
|
||||
# not in remote_ids
|
||||
drop_ids = indexed_ids - remote_ids
|
||||
for drop_id in drop_ids:
|
||||
writer.delete_by_term('id',drop_id)
|
||||
|
||||
|
||||
# Drop any id in indexed_ids
|
||||
# not in remote_ids
|
||||
drop_ids = indexed_ids - remote_ids
|
||||
for drop_id in drop_ids:
|
||||
writer.delete_by_term('id',drop_id)
|
||||
# Update any id in indexed_ids
|
||||
# and in remote_ids
|
||||
update_ids = indexed_ids & remote_ids
|
||||
for update_id in update_ids:
|
||||
# cop out
|
||||
writer.delete_by_term('id',update_id)
|
||||
item = full_items[update_id]
|
||||
self.add_drive_file(writer, item, temp_dir, config, update=True)
|
||||
count += 1
|
||||
|
||||
|
||||
# Update any id in indexed_ids
|
||||
# and in remote_ids
|
||||
update_ids = indexed_ids & remote_ids
|
||||
for update_id in update_ids:
|
||||
# cop out
|
||||
writer.delete_by_term('id',update_id)
|
||||
item = full_items[update_id]
|
||||
self.add_drive_file(writer, item, temp_dir, config, update=True)
|
||||
count += 1
|
||||
|
||||
|
||||
# Add any id not in indexed_ids
|
||||
# and in remote_ids
|
||||
add_ids = remote_ids - indexed_ids
|
||||
for add_id in add_ids:
|
||||
item = full_items[add_id]
|
||||
self.add_drive_file(writer, item, temp_dir, config, update=False)
|
||||
count += 1
|
||||
# Add any id not in indexed_ids
|
||||
# and in remote_ids
|
||||
add_ids = remote_ids - indexed_ids
|
||||
for add_id in add_ids:
|
||||
item = full_items[add_id]
|
||||
self.add_drive_file(writer, item, temp_dir, config, update=False)
|
||||
count += 1
|
||||
|
||||
except Exception as e:
|
||||
print("ERROR: While adding Google Drive files to search index")
|
||||
print("-"*40)
|
||||
print(repr(e))
|
||||
print("-"*40)
|
||||
print("Continuing...")
|
||||
pass
|
||||
|
||||
print("Cleaning temporary directory: %s"%(temp_dir))
|
||||
subprocess.call(['rm','-fr',temp_dir])
|
||||
@@ -1074,7 +1140,7 @@ class Search:
|
||||
elif doctype=='issue':
|
||||
item_keys = ['title','repo_name','repo_url','url','created_time','modified_time']
|
||||
elif doctype=='emailthread':
|
||||
item_keys = ['title','owner_name','url']
|
||||
item_keys = ['title','owner_name','url','created_time','modified_time']
|
||||
elif doctype=='ghfile':
|
||||
item_keys = ['title','repo_name','repo_url','url']
|
||||
elif doctype=='markdown':
|
||||
@@ -1091,11 +1157,7 @@ class Search:
|
||||
for r in results:
|
||||
d = {}
|
||||
for k in item_keys:
|
||||
if k=='created_time' or k=='modified_time':
|
||||
#d[k] = r[k]
|
||||
d[k] = dateutil.parser.parse(r[k]).strftime("%Y-%m-%d")
|
||||
else:
|
||||
d[k] = r[k]
|
||||
d[k] = r[k]
|
||||
json_results.append(d)
|
||||
|
||||
return json_results
|
||||
@@ -1108,7 +1170,9 @@ class Search:
|
||||
query_string = " ".join(query_list)
|
||||
query = None
|
||||
if ":" in query_string:
|
||||
query = QueryParser("content", self.schema).parse(query_string)
|
||||
query = QueryParser("content", self.schema)
|
||||
query.add_plugin(DateParserPlugin(free=True))
|
||||
query = query.parse(query_string)
|
||||
elif len(fields) == 1 and fields[0] == "filename":
|
||||
pass
|
||||
elif len(fields) == 2:
|
||||
@@ -1116,9 +1180,12 @@ class Search:
|
||||
else:
|
||||
# If the user does not specify a field,
|
||||
# these are the fields that are actually searched
|
||||
fields = ['title', 'content','owner_name','owner_email','url']
|
||||
fields = ['title', 'content','owner_name','owner_email','url','created_date','modified_date']
|
||||
if not query:
|
||||
query = MultifieldParser(fields, schema=self.ix.schema).parse(query_string)
|
||||
query = MultifieldParser(fields, schema=self.ix.schema)
|
||||
query.add_plugin(DateParserPlugin(free=True))
|
||||
query = query.parse(query_string)
|
||||
#query = MultifieldParser(fields, schema=self.ix.schema).parse(query_string)
|
||||
parsed_query = "%s" % query
|
||||
print("query: %s" % parsed_query)
|
||||
results = searcher.search(query, terms=False, scored=True, groupedby="kind")
|
||||
|
@@ -1,5 +1,7 @@
|
||||
import requests, os, re
|
||||
from bs4 import BeautifulSoup
|
||||
import dateutil.parser
|
||||
import datetime
|
||||
|
||||
class GroupsIOException(Exception):
|
||||
pass
|
||||
@@ -64,7 +66,7 @@ class GroupsIOArchivesCrawler(object):
|
||||
|
||||
## Short circuit
|
||||
## for debugging purposes
|
||||
#break
|
||||
break
|
||||
|
||||
return subgroups
|
||||
|
||||
@@ -251,7 +253,7 @@ class GroupsIOArchivesCrawler(object):
|
||||
subject = soup.find('title').text
|
||||
|
||||
# Extract information for the schema:
|
||||
# - permalink for thread (done)
|
||||
# - permalink for thread (done above)
|
||||
# - subject/title (done)
|
||||
# - original sender email/name (done)
|
||||
# - content (done)
|
||||
@@ -266,11 +268,35 @@ class GroupsIOArchivesCrawler(object):
|
||||
pass
|
||||
else:
|
||||
# found an email!
|
||||
# this is a maze, thanks groups.io
|
||||
# this is a maze, not amazing.
|
||||
# thanks groups.io!
|
||||
td = tr.find('td')
|
||||
divrow = td.find('div',{'class':'row'}).find('div',{'class':'pull-left'})
|
||||
|
||||
sender_divrow = td.find('div',{'class':'row'})
|
||||
sender_divrow = sender_divrow.find('div',{'class':'pull-left'})
|
||||
if (i+1)==1:
|
||||
original_sender = divrow.text.strip()
|
||||
original_sender = sender_divrow.text.strip()
|
||||
|
||||
date_divrow = td.find('div',{'class':'row'})
|
||||
date_divrow = date_divrow.find('div',{'class':'pull-right'})
|
||||
date_divrow = date_divrow.find('font',{'class':'text-muted'})
|
||||
date_divrow = date_divrow.find('script').text
|
||||
try:
|
||||
time_seconds = re.search(' [0-9]{1,} ',date_divrow).group(0)
|
||||
time_seconds = time_seconds.strip()
|
||||
# Thanks groups.io for the weird date formatting
|
||||
time_seconds = time_seconds[:10]
|
||||
mmicro_seconds = time_seconds[10:]
|
||||
if (i+1)==1:
|
||||
created_time = datetime.datetime.utcfromtimestamp(int(time_seconds))
|
||||
modified_time = datetime.datetime.utcfromtimestamp(int(time_seconds))
|
||||
else:
|
||||
modified_time = datetime.datetime.utcfromtimestamp(int(time_seconds))
|
||||
|
||||
except AttributeError:
|
||||
created_time = None
|
||||
modified_time = None
|
||||
|
||||
for div in td.find_all('div'):
|
||||
if div.has_attr('id'):
|
||||
|
||||
@@ -299,7 +325,10 @@ class GroupsIOArchivesCrawler(object):
|
||||
|
||||
thread = {
|
||||
'permalink' : permalink,
|
||||
'created_time' : created_time,
|
||||
'modified_time' : modified_time,
|
||||
'subject' : subject,
|
||||
'subgroup' : subgroup_name,
|
||||
'original_sender' : original_sender,
|
||||
'content' : full_content
|
||||
}
|
||||
@@ -324,11 +353,13 @@ class GroupsIOArchivesCrawler(object):
|
||||
|
||||
results = []
|
||||
for row in rows:
|
||||
# We don't care about anything except title and ugly link
|
||||
# This is where we extract
|
||||
# a list of thread titles
|
||||
# and corresponding links.
|
||||
subject = row.find('span',{'class':'subject'})
|
||||
title = subject.get_text()
|
||||
link = row.find('a')['href']
|
||||
#print(title)
|
||||
|
||||
results.append((title,link))
|
||||
|
||||
return results
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<div class="alert alert-success alert-dismissible fade in">
|
||||
<a href="#" class="close" data-dismiss="alert" aria-label="close">×</a>
|
||||
{% for message in messages %}
|
||||
<p>{{ message }}</p>
|
||||
<p class="lead">{{ message }}</p>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
|
Reference in New Issue
Block a user