Get array of posts from the current archive page loop

Classic WordPress loop (for example in archive.php) looks like this:

if ( have_posts() ) :     while ( have_posts() ) : the_post();         get_template_part( 'template-parts/content', get_post_format() );     endwhile; endif; 

I want to get an array of post objects on archive page without having to do this:

$  my_posts = array(); while ( have_posts() ) {   the_post();   $  my_posts[] = $  post; }  // $  my_posts is array of post objects 

Is there any simpler method to get it?

Making a Zsync file archive with checksums

I’m learning go by doing it. I tried to port the Java’s ZsyncMake implementation into Golang. I also employ the Go’s concurrency API with goroutine and channel. I have some experience in Java, but never work with native language. One immediately coming problem is int in Golang isn’t the same as int32 (since it depends on the platform; Java’s int is 4 byte), thus I need to cast it most of the time.

Here’s my code. In some comments I wrote [ASK] to indicate that I’m not sure if it’s a proper way of implementation in Go

package zsync  import (     "bufio"     "crypto/sha1"     "encoding/binary"     "encoding/hex"     "goZsyncmake/md4"     "goZsyncmake/zsyncOptions"     "hash"     "io"     "log"     "math"     "os"     "strconv"     "time" )  var ZSYNC_VERSION = "0.6.2" var BLOCK_SIZE_SMALL = 2048 var BLOCK_SIZE_LARGE = 4096  func ZsyncMake(path string, options zsyncOptions.Options) {     checksum, headers, zsyncFilePath := writeToFile(path, options)     zsyncFile, err := os.Create(zsyncFilePath)     if err != nil {         log.Fatal(err)     }     defer zsyncFile.Close()      bfio := bufio.NewWriter(zsyncFile)     _, err = bfio.WriteString(headers)     if err != nil {         log.Fatal(err)     }      _, err = bfio.Write(checksum)     if err != nil {         log.Fatal(err)     }      bfio.Flush() }  func writeToFile(path string, options zsyncOptions.Options) ([]byte, string, string) {     file, err := os.Open(path)     if err != nil {         log.Fatal(err)     }     defer file.Close()      outputFileName := file.Name() + ".zsync"      fileInfo, err := file.Stat()     if err != nil {         log.Fatal(err)     }      opts := calculateMissingValues(options, file)      blockSize := opts.BlockSize     fileLength := fileInfo.Size()     sequenceMatches := 0     if fileLength > int64(options.BlockSize) {         sequenceMatches = 2     } else {         sequenceMatches = 1     }     weakChecksumLength := weakChecksumLength(fileLength, blockSize, sequenceMatches)     strongChecksumLength := strongChecksumLength(fileLength, blockSize, sequenceMatches)      fileDigest := sha1.New()     blockDigest := md4.New()      checksum, fileChecksum := computeChecksum(file, blockSize, fileLength, weakChecksumLength, strongChecksumLength, fileDigest, blockDigest)     strFileChecksum := hex.EncodeToString(fileChecksum)      // [ASK] I suspect I can improve performance here rather than appending string with +     strHeader := "zsync: " + ZSYNC_VERSION + "\n" +         "Filename: " + fileInfo.Name() + "\n" +         "MTime: " + fileInfo.ModTime().Format(time.RFC1123Z) + "\n" +         "Blocksize: " + strconv.Itoa(blockSize) + "\n" +         "Length: " + strconv.Itoa(int(fileLength)) + "\n" +         "Hash-Lengths: " + strconv.Itoa(sequenceMatches) + "," + strconv.Itoa(weakChecksumLength) + "," + strconv.Itoa(strongChecksumLength) + "\n" +         "URL: " + opts.Url + "\n" +         "SHA-1: " + strFileChecksum + "\n\n"      return checksum, strHeader, outputFileName  }  func sha1HashFile(path string, fileChecksumChannel chan []byte) {     file, err := os.Open(path)     if err != nil {         log.Fatal(err)     }     defer file.Close()      hasher := sha1.New()     if _, err := io.Copy(hasher, file); err != nil {         log.Fatal(err)     }      fileChecksumChannel <- hasher.Sum(nil) }  func computeChecksum(f *os.File, blocksize int, fileLength int64, weakLen int, strongLen int, fileDigest hash.Hash, blockDigest hash.Hash) ([]byte, []byte) {      checksumBytes := make([]byte, 0)     block := make([]byte, blocksize)      fileChecksumChannel := make(chan []byte)     go sha1HashFile(f.Name(), fileChecksumChannel)      for {         read, err := f.Read(block)         if err != nil {             if err == io.EOF {                 break             }             log.Fatal(err)         }          if read < blocksize {              blockSlice := block[read:blocksize]             for i := range blockSlice {                 blockSlice[i] = byte(0)             }          }          rsum := computeRsum(block)          unsignedWeakByte := make([]byte, 4)         binary.BigEndian.PutUint32(unsignedWeakByte, uint32(rsum))          tempUnsignedWeakByte := unsignedWeakByte[len(unsignedWeakByte)-weakLen:]         checksumBytes = append(checksumBytes, tempUnsignedWeakByte...)          blockDigest.Reset()         blockDigest.Write(block)         strongBytes := blockDigest.Sum(nil)          tempUnsignedStrongByte := strongBytes[:strongLen]         checksumBytes = append(checksumBytes, tempUnsignedStrongByte...)      }      fileChecksum := <- fileChecksumChannel      checksumBytes = append(checksumBytes, fileChecksum...)      return checksumBytes, fileChecksum  }   // [ASK] A lot of type casting happen here, not sure if it's a good practice in Go func strongChecksumLength(fileLength int64, blocksize int, sequenceMatches int) int {     // estimated number of bytes to allocate for strong checksum     d := (math.Log(float64(fileLength))+math.Log(float64(1+fileLength/int64(blocksize))))/math.Log(2) + 20      // reduced number of bits by sequence matches     lFirst := float64(math.Ceil(d / float64(sequenceMatches) / 8))      // second checksum - not reduced by sequence matches     lSecond := float64((math.Log(float64(1+fileLength/int64(blocksize)))/math.Log(2) + 20 + 7.9) / 8)      // return max of two: return no more than 16 bytes (MD4 max)     return int(math.Min(float64(16), math.Max(lFirst, lSecond))) }  // [ASK] A lot of type casting happen here, not sure if it's a good practice in Go     func weakChecksumLength(fileLength int64, blocksize int, sequenceMatches int) int {     // estimated number of bytes to allocate for the rolling checksum per formula in     // Weak Checksum section of http://zsync.moria.org.uk/paper/ch02s03.html     d := (math.Log(float64(fileLength))+math.Log(float64(blocksize)))/math.Log(2) - 8.6      // reduced number of bits by sequence matches per http://zsync.moria.org.uk/paper/ch02s04.html     rdc := d / float64(sequenceMatches) / 8     lrdc := int(math.Ceil(rdc))      // enforce max and min values     if lrdc > 4 {         return 4     } else {         if lrdc < 2 {             return 2         } else {             return lrdc         }     } }  // [ASK] A lot of type casting happen here, not sure if it's a good practice in Go func computeRsum(block []byte) int {     var a int16     var b int16     l := len(block)     for i := 0; i < len(block); i++ {         val := int(unsign(block[i]))         a += int16(val)         b += int16(l * val)         l--     }     x := int(a) << 16     y := int(b) & 0xffff     return int(x) | int(y) }  func unsign(b byte) uint8 {     if b < 0 {         return b & 0xFF     } else {         return b     } }  func calculateMissingValues(opts zsyncOptions.Options, f *os.File) zsyncOptions.Options {     if opts.BlockSize == 0 {         opts.BlockSize = calculateDefaultBlockSizeForInputFile(f)     }     if opts.Filename == "" {         opts.Filename = f.Name()     }     if opts.Url == "" {         opts.Url = f.Name()     }     return opts }  func calculateDefaultBlockSizeForInputFile(f *os.File) int {     fileInfo, err := f.Stat()     if err != nil {         log.Fatal(err)     }     if fileInfo.Size() < 100*1<<20 {         return BLOCK_SIZE_SMALL     } else {         return BLOCK_SIZE_LARGE     } } 

Also, coming from Java background, I get use to modularize everything, including this Options struct onto other file. Am I suppose to modularize it?

package zsyncOptions  type Options struct {     BlockSize int     Filename  string     Url       string } 

How do I archive an email without needing to select it first?

I’m using Gmail shortcuts. When navigating my inbox list I can easily move up/down using:

Newer conversation k
Older conversation j

This shows a blue highlight near the message I’m currently focused on:

enter image description here

How do I archive the currently highlighted email (without needing to select it first)?


I know I can hit x to select the message and then hit e to archive the selected messages, but the whole point of keyboard shortcuts is to make things as fast as possible, so I’m hoping there’s a single key option.

I tried the following shortcut:

Archive conversation and go previous/next ] or [

… but that only works if you first open (o) a message, meaning it’s a two step process again.

How to automatically archive AWS S3 whole buckets to S3 Glacier?

I’m required to archive around 200 AWS S3 buckets to S3 Glacier and I would like to do it automatically but I can’t find how it can be done with aws-cli.

The only method I found, is through AWS UI… to go to each bucket manually and within it, to mark each directory -> right click and choose “change storage type” and choose Glacier.

Does anyone have any experience with this?

How can I archive old contacts out of My Contacts?

On an Android (6.0), how can I archive old Google contacts where they do not show up in my contacts but they are listed in either:

  • the google contacts website under other contacts

  • archived in another app/cloud storage securely.

Ideally moving them from the archive to my contacts list would be doable from a mobile app, or the archive service would have a mobile website where they are accessible.

To clarify: I don’t want to delete them forever or be inaccessible on mobile.

How do I archive old copies of Messages?

I have many backups that contain versions of ~/Library/Messages. Each one contains 3 files and a directory:

    XYZ:Messages user$   tree -a -L 1     .     ├── Attachments     ├── chat.db     ├── chat.db-shm     └── chat.db-wal      1 directory, 3 files 

Some of these backups may overlap with each other. The result is 10-20 folders of messages with no obvious way to access them. I would like to unify all of them and remove duplicates if they exist.

Are there any applications that do this or any sort of tutorial I could follow? The files are sqlite so even a specification of the database structure would be helpful.