aerc/worker/imap/open.go

165 lines
4.2 KiB
Go
Raw Normal View History

2019-01-13 21:18:10 +00:00
package imap
import (
"sort"
2020-09-12 13:05:02 +00:00
sortthread "github.com/emersion/go-imap-sortthread"
"git.sr.ht/~rjarry/aerc/logging"
"git.sr.ht/~rjarry/aerc/worker/types"
2019-01-13 21:18:10 +00:00
)
func (imapw *IMAPWorker) handleOpenDirectory(msg *types.OpenDirectory) {
logging.Infof("Opening %s", msg.Directory)
imap: add option to cache headers Add option to cache headers for imap accounts. Cache db is located at $XDG_CACHE_DIR/aerc/{account name}. The cache is cleaned of stale entries when aerc is first opened. Two new account level configuration options are introduced: * cache-headers (Default: false) * cache-max-age (Default: 30 days (720 hours)) The change in worker/imap/open.go is to set the selected directory. This is required to access the UIDVALIDITY field, which is used in combination with the message ID to form the key for use in the cache db. The key structure is: "header.{UIDVALIDITY}.{UID}" Where reasonable, cache does not stop aerc from running. In general, if there is an error in the cache, aerc should continue working as usual. Errors are either displayed to the user or logged. All messages are stored without flags, and when retrieved have the flags set to SEEN. This is to prevent UI flashes. A new method to FetchMessageFlags is introduced to update flags of cached headers. This is done asynchronously, and the user will see their messages appear and then any flags updated. The message will initially show as SEEN, but will update to unread. I considered updating the cache with the last-known flag state, however it seems prudent to spare the R/W cycle and assume that - eventually - all messages will end up read, and if it isn't the update will occur rather quickly. Note that leveldb puts a lock on the database, preventing multiple instances of aerc from accessing the cache at the same time. Much of this work is based on previous efforts by Vladimír Magyar. Implements: https://todo.sr.ht/~rjarry/aerc/2 Thanks: Vladimír Magyar <vladimir@mgyar.me> Signed-off-by: Tim Culverhouse <tim@timculverhouse.com> Tested-by: inwit <inwit@sindominio.net> Reviewed-by: Koni Marti <koni.marti@gmail.com> Acked-by: Robin Jarry <robin@jarry.cc>
2022-06-15 12:23:51 +00:00
sel, err := imapw.client.Select(msg.Directory, false)
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
} else {
imap: add option to cache headers Add option to cache headers for imap accounts. Cache db is located at $XDG_CACHE_DIR/aerc/{account name}. The cache is cleaned of stale entries when aerc is first opened. Two new account level configuration options are introduced: * cache-headers (Default: false) * cache-max-age (Default: 30 days (720 hours)) The change in worker/imap/open.go is to set the selected directory. This is required to access the UIDVALIDITY field, which is used in combination with the message ID to form the key for use in the cache db. The key structure is: "header.{UIDVALIDITY}.{UID}" Where reasonable, cache does not stop aerc from running. In general, if there is an error in the cache, aerc should continue working as usual. Errors are either displayed to the user or logged. All messages are stored without flags, and when retrieved have the flags set to SEEN. This is to prevent UI flashes. A new method to FetchMessageFlags is introduced to update flags of cached headers. This is done asynchronously, and the user will see their messages appear and then any flags updated. The message will initially show as SEEN, but will update to unread. I considered updating the cache with the last-known flag state, however it seems prudent to spare the R/W cycle and assume that - eventually - all messages will end up read, and if it isn't the update will occur rather quickly. Note that leveldb puts a lock on the database, preventing multiple instances of aerc from accessing the cache at the same time. Much of this work is based on previous efforts by Vladimír Magyar. Implements: https://todo.sr.ht/~rjarry/aerc/2 Thanks: Vladimír Magyar <vladimir@mgyar.me> Signed-off-by: Tim Culverhouse <tim@timculverhouse.com> Tested-by: inwit <inwit@sindominio.net> Reviewed-by: Koni Marti <koni.marti@gmail.com> Acked-by: Robin Jarry <robin@jarry.cc>
2022-06-15 12:23:51 +00:00
imapw.selected = sel
imapw.worker.PostMessage(&types.Done{Message: types.RespondTo(msg)}, nil)
}
2019-01-13 21:18:10 +00:00
}
func (imapw *IMAPWorker) handleFetchDirectoryContents(
msg *types.FetchDirectoryContents) {
logging.Infof("Fetching UID list")
searchCriteria, err := parseSearch(msg.FilterCriteria)
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
return
2020-09-12 13:05:02 +00:00
}
sortCriteria := translateSortCriterions(msg.SortCriteria)
var uids []uint32
// If the server supports the SORT extension, do the sorting server side
ok, err := imapw.client.sort.SupportSort()
if err == nil && ok && len(sortCriteria) > 0 {
uids, err = imapw.client.sort.UidSort(sortCriteria, searchCriteria)
// copy in reverse as msgList displays backwards
for i, j := 0, len(uids)-1; i < j; i, j = i+1, j-1 {
uids[i], uids[j] = uids[j], uids[i]
}
} else {
if err != nil {
// Non fatal, but we do want to print to get some debug info
logging.Errorf("can't check for SORT support: %v", err)
2020-09-12 13:05:02 +00:00
}
uids, err = imapw.client.UidSearch(searchCriteria)
}
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
} else {
logging.Infof("Found %d UIDs", len(uids))
imapw.seqMap.Clear()
imapw.worker.PostMessage(&types.DirectoryContents{
Message: types.RespondTo(msg),
Uids: uids,
}, nil)
imapw.worker.PostMessage(&types.Done{Message: types.RespondTo(msg)}, nil)
}
}
2020-09-12 13:05:02 +00:00
type sortFieldMapT map[types.SortField]sortthread.SortField
// caution, incomplete mapping
var sortFieldMap sortFieldMapT = sortFieldMapT{
types.SortArrival: sortthread.SortArrival,
types.SortCc: sortthread.SortCc,
types.SortDate: sortthread.SortDate,
types.SortFrom: sortthread.SortFrom,
types.SortSize: sortthread.SortSize,
types.SortSubject: sortthread.SortSubject,
types.SortTo: sortthread.SortTo,
}
func translateSortCriterions(
cs []*types.SortCriterion) []sortthread.SortCriterion {
result := make([]sortthread.SortCriterion, 0, len(cs))
for _, c := range cs {
if f, ok := sortFieldMap[c.Field]; ok {
result = append(result, sortthread.SortCriterion{Field: f, Reverse: c.Reverse})
2020-09-12 13:05:02 +00:00
}
}
return result
}
func (imapw *IMAPWorker) handleDirectoryThreaded(
msg *types.FetchDirectoryThreaded) {
logging.Infof("Fetching threaded UID list")
searchCriteria, err := parseSearch(msg.FilterCriteria)
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
return
}
threads, err := imapw.client.thread.UidThread(sortthread.References,
searchCriteria)
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
} else {
aercThreads, count := convertThreads(threads, nil)
sort.Sort(types.ByUID(aercThreads))
logging.Infof("Found %d threaded messages", count)
imapw.seqMap.Clear()
imapw.worker.PostMessage(&types.DirectoryThreaded{
Message: types.RespondTo(msg),
Threads: aercThreads,
}, nil)
imapw.worker.PostMessage(&types.Done{Message: types.RespondTo(msg)}, nil)
}
}
func convertThreads(threads []*sortthread.Thread, parent *types.Thread) ([]*types.Thread, int) {
if threads == nil {
return nil, 0
}
conv := make([]*types.Thread, len(threads))
count := 0
for i := 0; i < len(threads); i++ {
t := threads[i]
conv[i] = &types.Thread{
Uid: t.Id,
}
// Set the first child node
children, childCount := convertThreads(t.Children, conv[i])
if len(children) > 0 {
conv[i].FirstChild = children[0]
}
// Set the parent node
if parent != nil {
conv[i].Parent = parent
// elements of threads are siblings
if i > 0 {
conv[i].PrevSibling = conv[i-1]
conv[i-1].NextSibling = conv[i]
}
}
count += childCount + 1
}
return conv, count
}