<?xml version="1.0" encoding="UTF-8"?>
 <rdf:RDF xmlns="http://purl.org/rss/1.0/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://web.resource.org/cc/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns:admin="http://webns.net/mvcb/">
  <channel rdf:about="http://pinboard.in">
    <title>Pinboard (peakscale)</title>
    <link>https://pinboard.in/u:peakscale/public/</link>
    <description>recent bookmarks from peakscale</description>
    <items>
      <rdf:Seq>	<rdf:li rdf:resource="https://blog.cloudflare.com/post-mortem-on-cloudflare-control-plane-and-analytics-outage/"/>
	<rdf:li rdf:resource="https://blog.cloudflare.com/cloudflare-incident-on-october-30-2023/"/>
	<rdf:li rdf:resource="https://engineering.atspotify.com/2022/03/incident-report-spotify-outage-on-march-8/"/>
	<rdf:li rdf:resource="https://hacks.mozilla.org/2022/02/retrospective-and-technical-details-on-the-recent-firefox-outage/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/65648/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/680587/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/656481/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/65649/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/2329B7/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/4372T8/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/41926/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/74876/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/56489/"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/17908/"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incidents/6PM5mNd43NbMqjCZ5REh"/>
	<rdf:li rdf:resource="https://engineering.fb.com/2021/10/05/networking-traffic/outage-details/"/>
	<rdf:li rdf:resource="https://www.fastly.com/blog/summary-of-june-8-outage"/>
	<rdf:li rdf:resource="https://github.blog/2021-02-02-github-availability-report-january-2021/"/>
	<rdf:li rdf:resource="https://blog.coinbase.com/incident-post-mortem-january-29-2021-5ab5247e43da"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/zall/20013#20013004"/>
	<rdf:li rdf:resource="https://aws.amazon.com/message/11201/"/>
	<rdf:li rdf:resource="https://m.signalvnoise.com/inside-a-code-red-network-edition/"/>
	<rdf:li rdf:resource="https://blog.rust-lang.org/inside-rust/2020/02/26/crates-io-incident-report.html"/>
	<rdf:li rdf:resource="https://segment.com/security/bulletins/incident090519/"/>
	<rdf:li rdf:resource="https://docs.fcc.gov/public/attachments/DOC-359134A1.pdf"/>
	<rdf:li rdf:resource="https://status.aws.amazon.com/s3-20080720.html"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/cloud-networking/18012"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/bigquery/18036#18036003"/>
	<rdf:li rdf:resource="https://labs.spotify.com/2013/06/04/incident-management-at-spotify/"/>
	<rdf:li rdf:resource="https://wiki.gentoo.org/wiki/Github/2018-06-28"/>
	<rdf:li rdf:resource="https://www.parliament.uk/documents/commons-committees/treasury/Correspondence/2017-19/visa-response-150618.pdf"/>
	<rdf:li rdf:resource="https://blog.cloudflare.com/today-we-mitigated-1-1-1-1/"/>
	<rdf:li rdf:resource="https://gocardless.com/blog/incident-review-api-and-dashboard-outage-on-10th-october/"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/cloud-networking/18009"/>
	<rdf:li rdf:resource="https://status.python.org/incidents/1y1f44q6srh2"/>
	<rdf:li rdf:resource="https://blog.travis-ci.com/2018-04-03-incident-post-mortem"/>
	<rdf:li rdf:resource="https://www.tesla.com/blog/update-last-week%E2%80%99s-accident"/>
	<rdf:li rdf:resource="https://blogs.msdn.microsoft.com/vsoservice/?p=16295"/>
	<rdf:li rdf:resource="https://githubengineering.com/ddos-incident-report/"/>
	<rdf:li rdf:resource="https://www.epicgames.com/fortnite/en-US/news/postmortem-of-service-outage-at-3-4m-ccu"/>
	<rdf:li rdf:resource="http://status.ovh.net/?do=details&amp;id=15162#comment18119"/>
	<rdf:li rdf:resource="https://status.discordapp.com/incidents/qk9cdgnqnhcn"/>
	<rdf:li rdf:resource="https://status.heroku.com/incidents/1290"/>
	<rdf:li rdf:resource="https://status.pagerduty.com/incidents/v2vrgccbtgxn"/>
	<rdf:li rdf:resource="https://blog.cloudflare.com/the-story-of-two-outages/?__s=2jqwrxa5kp26foaxfe3z"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/cloud-networking/17002"/>
	<rdf:li rdf:resource="https://blog.travis-ci.com/2017-09-20-sept-6-11-macos-outage-postmortem"/>
	<rdf:li rdf:resource="https://www.circonus.com/2017/04/postmortem-2017-04-11-firewall-outage/"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/storage/17002#5713144022302720"/>
	<rdf:li rdf:resource="https://www.theregister.co.uk/2017/07/13/watercooling_leak_killed_vnx_array/"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/appengine/17006#5737979670691840"/>
	<rdf:li rdf:resource="https://www.digitalocean.com/company/blog/update-on-the-april-5th-2017-outage/"/>
	<rdf:li rdf:resource="https://about.gitlab.com/2017/02/10/postmortem-of-database-outage-of-january-31/"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/compute/17003"/>
	<rdf:li rdf:resource="https://blog.cloudflare.com/how-and-why-the-leap-second-affected-cloudflare-dns/"/>
	<rdf:li rdf:resource="https://lukasa.co.uk/2016/11/Five_Whys_Requests_212/"/>
	<rdf:li rdf:resource="http://status.pingdom.com/incidents/73tpmc4tfzk5"/>
	<rdf:li rdf:resource="http://hub.dyn.com/dyn-blog/dyn-analysis-summary-of-friday-october-21-attack"/>
	<rdf:li rdf:resource="http://royal.pingdom.com/2016/10/24/ddos-attack-affects-pingdom/"/>
	<rdf:li rdf:resource="http://hub.dyn.com/dyn-blog/dyn-statement-on-10-21-2016-ddos-attack"/>
	<rdf:li rdf:resource="https://sandstorm.io/news/2016-09-30-fiber-bomb-debugging-story"/>
	<rdf:li rdf:resource="https://blog.travis-ci.com/2016-09-30-the-day-we-deleted-our-vm-images/"/>
	<rdf:li rdf:resource="https://status.cloud.google.com/incident/compute/16015"/>
	<rdf:li rdf:resource="http://stackstatus.net/post/147710624694/outage-postmortem-july-20-2016"/>
	<rdf:li rdf:resource="https://about.gitlab.com/2016/06/29/may-2-2016-security-release-post-mortem/"/>
	<rdf:li rdf:resource="http://aws.amazon.com/message/4372T8/"/>
	<rdf:li rdf:resource="https://users.rust-lang.org/t/crates-io-is-down-fixed/6060/3"/>
	<rdf:li rdf:resource="http://status.snowthegame.com/incidents/m8lgq1d2r7kl"/>
	<rdf:li rdf:resource="https://envygeeks.io/jekyll/2016/05/21/postmortem-a-tale-of-how-discourse-almost-took-us-out"/>
	<rdf:li rdf:resource="https://www.elastic.co/blog/elastic-cloud-outage-april-2016"/>
      </rdf:Seq>
    </items>
  </channel><item rdf:about="https://blog.cloudflare.com/post-mortem-on-cloudflare-control-plane-and-analytics-outage/">
    <title>Post Mortem on Cloudflare Control Plane and Analytics Outage</title>
    <dc:date>2023-11-04T14:02:23+00:00</dc:date>
    <link>https://blog.cloudflare.com/post-mortem-on-cloudflare-control-plane-and-analytics-outage/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:f73d214c9262/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.cloudflare.com/cloudflare-incident-on-october-30-2023/">
    <title>Cloudflare incident on October 30, 2023</title>
    <dc:date>2023-11-01T16:48:42+00:00</dc:date>
    <link>https://blog.cloudflare.com/cloudflare-incident-on-october-30-2023/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:fb81e21eddb2/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://engineering.atspotify.com/2022/03/incident-report-spotify-outage-on-march-8/">
    <title>Incident Report: Spotify Outage on March 8 : Spotify Engineering</title>
    <dc:date>2022-03-11T23:34:20+00:00</dc:date>
    <link>https://engineering.atspotify.com/2022/03/incident-report-spotify-outage-on-march-8/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:de116b0372f4/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://hacks.mozilla.org/2022/02/retrospective-and-technical-details-on-the-recent-firefox-outage/">
    <title>Retrospective and Technical Details on the recent Firefox Outage - Mozilla Hacks - the Web developer blog</title>
    <dc:date>2022-02-07T01:32:34+00:00</dc:date>
    <link>https://hacks.mozilla.org/2022/02/retrospective-and-technical-details-on-the-recent-firefox-outage/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:1f3343343bdc/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/65648/">
    <title>Summary of the Amazon EC2 and Amazon RDS Service Disruption in the US East Region</title>
    <dc:date>2021-12-08T01:19:42+00:00</dc:date>
    <link>https://aws.amazon.com/message/65648/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:714aad74d300/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/680587/">
    <title>Summary of the December 24, 2012 Amazon ELB Service Event in the US-East Region</title>
    <dc:date>2021-12-08T01:19:27+00:00</dc:date>
    <link>https://aws.amazon.com/message/680587/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:02c0cf349503/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/656481/">
    <title>Summary of the December 17th event in the South America Region (SA-EAST-1)</title>
    <dc:date>2021-12-08T01:19:19+00:00</dc:date>
    <link>https://aws.amazon.com/message/656481/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:f96aa52a2617/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/65649/">
    <title>Summary of the Amazon SimpleDB Service Disruption</title>
    <dc:date>2021-12-08T01:19:11+00:00</dc:date>
    <link>https://aws.amazon.com/message/65649/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:5cb08a06fb78/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/2329B7/">
    <title>Summary of the Amazon EC2, Amazon EBS, and Amazon RDS Service Event in the EU West Region</title>
    <dc:date>2021-12-08T01:19:03+00:00</dc:date>
    <link>https://aws.amazon.com/message/2329B7/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:84a36b8574a9/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/4372T8/">
    <title>Summary of the AWS Service Event in the Sydney Region</title>
    <dc:date>2021-12-08T01:18:53+00:00</dc:date>
    <link>https://aws.amazon.com/message/4372T8/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:9b3c253ce3d3/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/41926/">
    <title>Summary of the Amazon S3 Service Disruption in the Northern Virginia (US-EAST-1) Region</title>
    <dc:date>2021-12-08T01:18:44+00:00</dc:date>
    <link>https://aws.amazon.com/message/41926/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:2e4f62b9afe8/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/74876/">
    <title>아시아-태평양 서울 리전(AP-NorthEast-2)의 Amazon EC2 DNS 확인(Resolution) 이슈 요약</title>
    <dc:date>2021-12-08T01:18:31+00:00</dc:date>
    <link>https://aws.amazon.com/message/74876/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem dns</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:1605da2ca666/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:dns"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/56489/">
    <title>Summary of the Amazon EC2 Issues in the Asia Pacific (Tokyo) Region (AP-NORTHEAST-1)</title>
    <dc:date>2021-12-08T01:12:25+00:00</dc:date>
    <link>https://aws.amazon.com/message/56489/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:93ebe3ad981d/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/17908/">
    <title>Summary of AWS Direct Connect Event in the Tokyo (AP-NORTHEAST-1) Region</title>
    <dc:date>2021-12-08T01:12:12+00:00</dc:date>
    <link>https://aws.amazon.com/message/17908/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:6f143a79a25a/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incidents/6PM5mNd43NbMqjCZ5REh">
    <title>Google Cloud Status Dashboard</title>
    <dc:date>2021-11-23T14:14:50+00:00</dc:date>
    <link>https://status.cloud.google.com/incidents/6PM5mNd43NbMqjCZ5REh</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem google networks</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:34c037d2a3f9/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:google"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:networks"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://engineering.fb.com/2021/10/05/networking-traffic/outage-details/">
    <title>More details about the October 4 outage - Facebook Engineering</title>
    <dc:date>2021-10-05T18:48:40+00:00</dc:date>
    <link>https://engineering.fb.com/2021/10/05/networking-traffic/outage-details/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem networks dns</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:77d8608abb4e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:networks"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:dns"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.fastly.com/blog/summary-of-june-8-outage">
    <title>Summary of June 8 outage | Fastly</title>
    <dc:date>2021-06-09T13:18:28+00:00</dc:date>
    <link>https://www.fastly.com/blog/summary-of-june-8-outage</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA[Early June 8, a customer pushed a valid configuration change that included the specific circumstances that triggered the bug, which caused 85% of our network to return errors.]]></description>
<dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:97de2b1c1c74/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://github.blog/2021-02-02-github-availability-report-january-2021/">
    <title>GitHub Availability Report: January 2021 - The GitHub Blog</title>
    <dc:date>2021-02-08T02:03:29+00:00</dc:date>
    <link>https://github.blog/2021-02-02-github-availability-report-january-2021/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:af6094736f61/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.coinbase.com/incident-post-mortem-january-29-2021-5ab5247e43da">
    <title>Incident Post Mortem: January 29, 2021 | by Coinbase | Feb, 2021 | The Coinbase Blog</title>
    <dc:date>2021-02-08T02:02:25+00:00</dc:date>
    <link>https://blog.coinbase.com/incident-post-mortem-january-29-2021-5ab5247e43da</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:ef984907a13e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/zall/20013#20013004">
    <title>Google Cloud Status Dashboard</title>
    <dc:date>2020-12-19T02:02:24+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/zall/20013#20013004</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem google</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:26bfafc4ee2b/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:google"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://aws.amazon.com/message/11201/">
    <title>Summary of the Amazon Kinesis Event in the Northern Virginia (US-EAST-1) Region</title>
    <dc:date>2020-11-29T20:36:23+00:00</dc:date>
    <link>https://aws.amazon.com/message/11201/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:94d010fa31cd/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://m.signalvnoise.com/inside-a-code-red-network-edition/">
    <title>Inside a CODE RED: Network Edition - Signal v. Noise</title>
    <dc:date>2020-09-06T14:48:50+00:00</dc:date>
    <link>https://m.signalvnoise.com/inside-a-code-red-network-edition/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem networks</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:bd820d2deec4/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:networks"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.rust-lang.org/inside-rust/2020/02/26/crates-io-incident-report.html">
    <title>crates.io incident report for 2020-02-20 | Inside Rust Blog</title>
    <dc:date>2020-02-28T21:42:27+00:00</dc:date>
    <link>https://blog.rust-lang.org/inside-rust/2020/02/26/crates-io-incident-report.html</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:60a274c37b8e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://segment.com/security/bulletins/incident090519/">
    <title>Security Bulletins - Security Incident</title>
    <dc:date>2019-09-19T15:31:05+00:00</dc:date>
    <link>https://segment.com/security/bulletins/incident090519/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem security</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:f9fc499a5e7e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:security"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://docs.fcc.gov/public/attachments/DOC-359134A1.pdf">
    <title>CenturyLink Network Outage Report (PDF)</title>
    <dc:date>2019-08-20T15:49:28+00:00</dc:date>
    <link>https://docs.fcc.gov/public/attachments/DOC-359134A1.pdf</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:09662eb68d96/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.aws.amazon.com/s3-20080720.html">
    <title>AWS Service Health Dashboard - Amazon S3 Availability Event: July 20, 2008</title>
    <dc:date>2018-12-13T13:52:00+00:00</dc:date>
    <link>https://status.aws.amazon.com/s3-20080720.html</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["

We've now determined that message corruption was the cause of the server-to-server communication problems. More specifically, we found that there were a handful of messages on Sunday morning that had a single bit corrupted such that the message was still intelligible, but the system state information was incorrect. "]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:5a9a76367c4e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/cloud-networking/18012">
    <title>Google Cloud - issue with Google Cloud Global Loadbalancers returning 502s</title>
    <dc:date>2018-07-19T16:20:30+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/cloud-networking/18012</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA[
On Tuesday, 17 July 2018, from 12:17 to 12:49 PDT, Google Cloud HTTP(S) Load Balancers returned 502s for some requests they received. The proportion of 502 return codes varied from 33% to 87% during the period. Automated monitoring alerted Google’s engineering team to the event at 12:19, and at 12:44 the team had identified the probable root cause and deployed a fix.]]></description>
<dc:subject>google postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:6c0ef571560e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:google"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/bigquery/18036#18036003">
    <title>Google Cloud Status Dashboard</title>
    <dc:date>2018-07-06T21:13:18+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/bigquery/18036#18036003</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["Configuration changes being rolled out on the evening of the incident were not applied in the intended order. This resulted in an incomplete configuration change becoming live in some zones, subsequently triggering the failure of customer jobs. During the process of rolling back the configuration, another incorrect configuration change was inadvertently applied, causing the second batch of job failures."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:ea644663ca1b/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://labs.spotify.com/2013/06/04/incident-management-at-spotify/">
    <title>Incident Management at Spotify | Labs</title>
    <dc:date>2018-07-06T21:11:50+00:00</dc:date>
    <link>https://labs.spotify.com/2013/06/04/incident-management-at-spotify/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["A few weeks ago Spotify had one of the biggest incidents in the last few years. It caused a major outage for a big chunk of our European users. For a few hours the music playback experience was damaged. Our users would see high latency when playing music and some of them were unable to log in.
[...]
Two months before the big outage we had an incident connected with one of our smallest backend services: Popcount. Popcount (this is our internal name) is the service that takes care of storing the list of subscribers for each of our more than 1 billion playlists."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:7f924ca7947b/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://wiki.gentoo.org/wiki/Github/2018-06-28">
    <title>Github/2018-06-28 - Gentoo Wiki</title>
    <dc:date>2018-07-05T05:08:43+00:00</dc:date>
    <link>https://wiki.gentoo.org/wiki/Github/2018-06-28</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["

An unknown entity gained control of an admin account for the Gentoo GitHub Organization and removed all access to the organization (and its repositories) from Gentoo developers. They then proceeded to make various changes to content. Gentoo Developers & Infrastructure escalated to GitHub support and the Gentoo Organization was frozen by GitHub staff. Gentoo has regained control of the Gentoo GitHub Organization and has reverted the bad commits and defaced content. "]]></description>
<dc:subject>postmortem security</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:53d48e1e36b9/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:security"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.parliament.uk/documents/commons-committees/treasury/Correspondence/2017-19/visa-response-150618.pdf">
    <title>Visa letter to parliament</title>
    <dc:date>2018-06-25T03:56:19+00:00</dc:date>
    <link>https://www.parliament.uk/documents/commons-committees/treasury/Correspondence/2017-19/visa-response-150618.pdf</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:4ffe84782c45/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.cloudflare.com/today-we-mitigated-1-1-1-1/">
    <title>Today we mitigated 1.1.1.1</title>
    <dc:date>2018-06-01T13:04:58+00:00</dc:date>
    <link>https://blog.cloudflare.com/today-we-mitigated-1-1-1-1/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["Today, in an effort to reclaim some technical debt, we deployed new code that introduced Gatebot to Provision API.

What we did not account for, and what Provision API didn’t know about, was that 1.1.1.0/24 and 1.0.0.0/24 are special IP ranges. Frankly speaking, almost every IP range is "special" for one reason or another, since our IP configuration is rather complex. But our recursive DNS resolver ranges are even more special: they are relatively new, and we're using them in a very unique way. Our hardcoded list of Cloudflare addresses contained a manual exception specifically for these ranges.

As you might be able to guess by now, we didn't implement this manual exception while we were doing the integration work. Remember, the whole idea of the fix was to remove the hardcoded gotchas!"]]></description>
<dc:subject>postmortem security networks</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:a88cc8097bf3/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:security"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:networks"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://gocardless.com/blog/incident-review-api-and-dashboard-outage-on-10th-october/">
    <title>Incident review: API and Dashboard outage on 10 October 2017 — GoCardless Blog</title>
    <dc:date>2018-05-26T22:10:33+00:00</dc:date>
    <link>https://gocardless.com/blog/incident-review-api-and-dashboard-outage-on-10th-october/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["On the afternoon of 10 October 2017, we experienced an outage of our API and Dashboard, lasting 1 hour and 50 minutes. Any requests made during that time failed, and returned an error.

The cause of the incident was a hardware failure on our primary database node, combined with unusual circumstances that prevented our database cluster automation from promoting one of the replica database nodes to act as the new primary.

This failure to promote a new primary database node extended an outage that would normally last 1 or 2 minutes to one that lasted almost 2 hours."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:a3e5b75febe6/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/cloud-networking/18009">
    <title>Google Cloud Status Dashboard</title>
    <dc:date>2018-05-24T15:49:13+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/cloud-networking/18009</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["On Wednesday 16 May 2018, Google Cloud Networking experienced loss of connectivity to external IP addresses located in us-east4 for a duration of 58 minutes."]]></description>
<dc:subject>postmortem google</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:753637894b90/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:google"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.python.org/incidents/1y1f44q6srh2">
    <title>Python Infrastructure Status - Next Generation PyPI Rollout</title>
    <dc:date>2018-04-17T02:43:56+00:00</dc:date>
    <link>https://status.python.org/incidents/1y1f44q6srh2</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:2ba20f5c333e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.travis-ci.com/2018-04-03-incident-post-mortem">
    <title>The Travis CI Blog: Incident Post-Mortem and Security Advisory: Data Exposure After travis-ci.com Outage</title>
    <dc:date>2018-04-09T04:24:44+00:00</dc:date>
    <link>https://blog.travis-ci.com/2018-04-03-incident-post-mortem</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:f504bd0429d7/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.tesla.com/blog/update-last-week%E2%80%99s-accident">
    <title>An Update on Last Week’s Accident | Tesla</title>
    <dc:date>2018-03-31T04:22:10+00:00</dc:date>
    <link>https://www.tesla.com/blog/update-last-week%E2%80%99s-accident</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:d391ff5211f6/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blogs.msdn.microsoft.com/vsoservice/?p=16295">
    <title>Updated and Completed Postmortem: Performance Issues and failures in VSTS West Europe – 7 February 2018 – Service Blog – Visual Studio Team Services</title>
    <dc:date>2018-03-03T14:36:27+00:00</dc:date>
    <link>https://blogs.msdn.microsoft.com/vsoservice/?p=16295</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:9a7bc4eec904/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://githubengineering.com/ddos-incident-report/">
    <title>February 28th DDoS Incident Report | GitHub Engineering</title>
    <dc:date>2018-03-02T05:40:15+00:00</dc:date>
    <link>https://githubengineering.com/ddos-incident-report/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["On Wednesday, February 28, 2018 GitHub.com was unavailable from 17:21 to 17:26 UTC and intermittently unavailable from 17:26 to 17:30 UTC due to a distributed denial-of-service (DDoS) attack."]]></description>
<dc:subject>postmortem security</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:761107690447/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:security"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.epicgames.com/fortnite/en-US/news/postmortem-of-service-outage-at-3-4m-ccu">
    <title>Epic Games' Fortnite</title>
    <dc:date>2018-02-09T17:24:18+00:00</dc:date>
    <link>https://www.epicgames.com/fortnite/en-US/news/postmortem-of-service-outage-at-3-4m-ccu</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["Fortnite hit a new peak of 3.4 million concurrent players last Sunday… and that didn’t come without issues! The extreme load caused 6 different incidents between Saturday and Sunday, with a mix of partial and total service disruptions to Fortnite."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:ce2861689c39/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="http://status.ovh.net/?do=details&amp;id=15162#comment18119">
    <title>OVH outage</title>
    <dc:date>2017-11-11T22:37:41+00:00</dc:date>
    <link>http://status.ovh.net/?do=details&amp;id=15162#comment18119</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:5f9e7b419d2b/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.discordapp.com/incidents/qk9cdgnqnhcn">
    <title>Discord Status - Unavailable Guilds &amp; Connection Issues</title>
    <dc:date>2017-10-17T10:06:41+00:00</dc:date>
    <link>https://status.discordapp.com/incidents/qk9cdgnqnhcn</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:1f6abf709f95/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.heroku.com/incidents/1290">
    <title>Incident 1290 | Heroku Status</title>
    <dc:date>2017-10-09T16:51:13+00:00</dc:date>
    <link>https://status.heroku.com/incidents/1290</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["The routing layer that directs traffic from the internet to customer dynos has an extremely slow memory leak that has existed for some time. Typically, this memory leak has been mitigated by regular deploys of the router. Recently, however, deploys to this component have been less frequent. At some point, we crossed a tipping point and the memory leak was no longer automatically remediated by ongoing deployments.

This memory leak caused the processes in the routing layer to be killed and restarted. During this period of the processes restarting, they were unable to receive traffic and connections received EOF responses."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:d19b7b5407dd/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.pagerduty.com/incidents/v2vrgccbtgxn">
    <title>PagerDuty Status - Delayed Notifications</title>
    <dc:date>2017-10-09T16:44:37+00:00</dc:date>
    <link>https://status.pagerduty.com/incidents/v2vrgccbtgxn</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["Degraded performance of one of our Cassandra database clusters caused delays outside tolerance limits to the delivery of notifications and the dispatching of webhooks. The degradation in performance was triggered during the replacement of a failed virtual machine in the cluster. This maintenance was unplanned, as the failure of the host was unexpected.

The procedure used to replace the failed node triggered a chain reaction of load on other nodes in the cluster, which hampered this cluster’s ability to do its primary job of processing notifications."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:df97c3d38577/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.cloudflare.com/the-story-of-two-outages/?__s=2jqwrxa5kp26foaxfe3z">
    <title>The Story of Two Outages</title>
    <dc:date>2017-09-30T18:13:40+00:00</dc:date>
    <link>https://blog.cloudflare.com/the-story-of-two-outages/?__s=2jqwrxa5kp26foaxfe3z</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem networks</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:f5e0a0abfce6/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:networks"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/cloud-networking/17002">
    <title>Google Cloud Networking Incident #17002</title>
    <dc:date>2017-09-22T12:42:27+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/cloud-networking/17002</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["Any GCE instance that was live-migrated between 13:56 PDT on Tuesday 29 August 2017 and 08:32 on Wednesday 30 August 2017 became unreachable via Google Cloud Network or Internal Load Balancing until between 08:56 and 14:18 (for regions other than us-central1) or 20:16 (for us-central1) on Wednesday. See https://goo.gl/NjqQ31 for a visual representation of the cumulative number of instances live-migrated over time.

Our internal investigation shows that, at peak, 2% of GCE instances were affected by the issue."]]></description>
<dc:subject>google postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:d52e71daff9e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:google"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.travis-ci.com/2017-09-20-sept-6-11-macos-outage-postmortem">
    <title>The Travis CI Blog: Sept 6 - 11 macOS outage postmortem</title>
    <dc:date>2017-09-21T12:35:24+00:00</dc:date>
    <link>https://blog.travis-ci.com/2017-09-20-sept-6-11-macos-outage-postmortem</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:450ef56d7b91/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.circonus.com/2017/04/postmortem-2017-04-11-firewall-outage/">
    <title>Postmortem: 2017-04-11 Firewall Outage | Circonus</title>
    <dc:date>2017-08-04T13:22:26+00:00</dc:date>
    <link>https://www.circonus.com/2017/04/postmortem-2017-04-11-firewall-outage/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["We use a pair of firewall devices in an active/passive configuration with automatic failover should one of the devices become unresponsive. The firewall device in question went down, and automatic failover did not trigger for an unknown reason (we are still investigating). When we realized the problem, we killed off the bad firewall device, causing the secondary to promote itself to master and service to be restored."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:7bd81436f505/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/storage/17002#5713144022302720">
    <title>Requests to Google Cloud Storage (GCS) JSON API experienced elevated error rates for a period of 3 hours and 15 minutes</title>
    <dc:date>2017-07-17T14:50:02+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/storage/17002#5713144022302720</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["A low-level software defect in an internal API service that handles GCS JSON requests caused infrequent memory-related process terminations. These process terminations increased as a result of a large volume in requests to the GCS Transfer Service, which uses the same internal API service as the GCS JSON API. This caused an increased rate of 503 responses for GCS JSON API requests for 3.25 hours."]]></description>
<dc:subject>postmortem google</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:5668032328ea/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:google"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.theregister.co.uk/2017/07/13/watercooling_leak_killed_vnx_array/">
    <title>What did OVH learn from 24-hour outage? Water and servers do not mix</title>
    <dc:date>2017-07-17T14:49:16+00:00</dc:date>
    <link>https://www.theregister.co.uk/2017/07/13/watercooling_leak_killed_vnx_array/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA[Including an article because the original incident log is in French.]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:6ecf048a6888/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/appengine/17006#5737979670691840">
    <title>Google Cloud Status Dashboard</title>
    <dc:date>2017-06-21T03:19:43+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/appengine/17006#5737979670691840</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["At the time of incident, Google engineers were upgrading the network topology and capacity of the region; a configuration error caused the existing links to be decommissioned before the replacement links could provide connectivity, resulting in a loss of connectivity for the asia-northeast1 region. Although the replacement links were already commissioned and appeared to be ready to serve, a network-routing protocol misconfiguration meant that the routes through those links were not able to carry traffic."
]]></description>
<dc:subject>postmortem google</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:1315f491eead/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:google"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.digitalocean.com/company/blog/update-on-the-april-5th-2017-outage/">
    <title>Update on the April 5th, 2017 Outage</title>
    <dc:date>2017-04-11T13:13:02+00:00</dc:date>
    <link>https://www.digitalocean.com/company/blog/update-on-the-april-5th-2017-outage/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["Within three minutes of the initial alerts, we discovered that our primary database had been deleted. Four minutes later we commenced the recovery process, using one of our time-delayed database replicas. Over the next four hours, we copied and restored the data to our primary and secondary replicas. The duration of the outage was due to the time it took to copy the data between the replicas and restore it into an active server."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:61de3f5a49c7/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://about.gitlab.com/2017/02/10/postmortem-of-database-outage-of-january-31/">
    <title>Postmortem of database outage of January 31 | GitLab</title>
    <dc:date>2017-02-11T03:56:27+00:00</dc:date>
    <link>https://about.gitlab.com/2017/02/10/postmortem-of-database-outage-of-january-31/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:abee83edf554/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/compute/17003">
    <title>Google Cloud Status Dashboard</title>
    <dc:date>2017-02-10T18:25:58+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/compute/17003</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["On Monday 30 January 2017, newly created Google Compute Engine instances, Cloud VPNs and network load balancers were unavailable for a duration of 2 hours 8 minutes."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:ddfa7dbbaacb/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.cloudflare.com/how-and-why-the-leap-second-affected-cloudflare-dns/">
    <title>How and why the leap second affected Cloudflare DNS</title>
    <dc:date>2017-01-03T03:49:04+00:00</dc:date>
    <link>https://blog.cloudflare.com/how-and-why-the-leap-second-affected-cloudflare-dns/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:b6211a366a5c/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://lukasa.co.uk/2016/11/Five_Whys_Requests_212/">
    <title>Five Whys on Requests 2.12</title>
    <dc:date>2016-11-30T23:36:31+00:00</dc:date>
    <link>https://lukasa.co.uk/2016/11/Five_Whys_Requests_212/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:3636bd0c4c5d/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="http://status.pingdom.com/incidents/73tpmc4tfzk5">
    <title>Pingdom Status - Problems with my Pingdom</title>
    <dc:date>2016-11-14T03:23:49+00:00</dc:date>
    <link>http://status.pingdom.com/incidents/73tpmc4tfzk5</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:d94cc80bbae1/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="http://hub.dyn.com/dyn-blog/dyn-analysis-summary-of-friday-october-21-attack">
    <title>Dyn Analysis Summary Of Friday October 21 Attack</title>
    <dc:date>2016-10-26T22:04:50+00:00</dc:date>
    <link>http://hub.dyn.com/dyn-blog/dyn-analysis-summary-of-friday-october-21-attack</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem networks</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:8b83f9fdf5e5/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:networks"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="http://royal.pingdom.com/2016/10/24/ddos-attack-affects-pingdom/">
    <title>How the Friday DDoS attack affected Pingdom - Pingdom Royal</title>
    <dc:date>2016-10-24T15:10:17+00:00</dc:date>
    <link>http://royal.pingdom.com/2016/10/24/ddos-attack-affects-pingdom/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem networks</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:19a40e37e541/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:networks"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="http://hub.dyn.com/dyn-blog/dyn-statement-on-10-21-2016-ddos-attack">
    <title>Dyn Statement on 10/21/2016 DDoS Attack</title>
    <dc:date>2016-10-22T23:09:44+00:00</dc:date>
    <link>http://hub.dyn.com/dyn-blog/dyn-statement-on-10-21-2016-ddos-attack</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem security networks</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:d5d6e0a10dc9/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:security"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:networks"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://sandstorm.io/news/2016-09-30-fiber-bomb-debugging-story">
    <title>The Mysterious Fiber Bomb Problem: A Debugging Story - Sandstorm Blog</title>
    <dc:date>2016-10-13T07:52:01+00:00</dc:date>
    <link>https://sandstorm.io/news/2016-09-30-fiber-bomb-debugging-story</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:7f11b39af2b6/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://blog.travis-ci.com/2016-09-30-the-day-we-deleted-our-vm-images/">
    <title>The Travis CI Blog: The day we deleted our VM images</title>
    <dc:date>2016-09-30T19:46:11+00:00</dc:date>
    <link>https://blog.travis-ci.com/2016-09-30-the-day-we-deleted-our-vm-images/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["In addition, our cleanup service had been briefly disabled to troubleshooting a potential race condition. Then we turned the automated cleanup back on. The service had a default hard coded amount of how many image names to query from our internal image catalog and it was set to 100.

When we started the cleanup service, the list of 100 image names, sorted by newest first, did not include our stable images, which were the oldest, did not get included in the results. Our cleanup service then promptly started deleting the older images from GCE, because its view of the world told it that those older images where no longer in use, i.e it looked like they were not in our catalog and all of our stable images got irrevocably deleted.

This immediately stopped builds from running. "]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:0346e46291ef/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://status.cloud.google.com/incident/compute/16015">
    <title>Google Cloud Status Dashboard</title>
    <dc:date>2016-08-09T17:19:30+00:00</dc:date>
    <link>https://status.cloud.google.com/incident/compute/16015</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["While removing a faulty router from service, a new procedure for diverting traffic from the router was used. This procedure applied a new configuration that resulted in announcing some Google Cloud Platform IP addresses from a single point of presence in the southwestern US. As these announcements were highly specific they took precedence over the normal routes to Google's network and caused a substantial proportion of traffic for the affected network ranges to be directed to this one point of presence. This misrouting directly caused the additional latency some customers experienced.

Additionally this misconfiguration sent affected traffic to next-generation infrastructure that was undergoing testing. This new infrastructure was not yet configured to handle Cloud Platform traffic and applied an overly-restrictive packet filter."]]></description>
<dc:subject>postmortem google</dc:subject>
<dc:identifier>https://pinboard.in/u:peakscale/b:a894387c2953/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:google"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="http://stackstatus.net/post/147710624694/outage-postmortem-july-20-2016">
    <title>Stack Exchange Network Status — Outage Postmortem - July 20, 2016</title>
    <dc:date>2016-07-20T20:53:26+00:00</dc:date>
    <link>http://stackstatus.net/post/147710624694/outage-postmortem-july-20-2016</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["The direct cause was a malformed post that caused one of our regular expressions to consume high CPU on our web servers. The post was in the homepage list, and that caused the expensive regular expression to be called on each home page view. "]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:1c8331ef47c3/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://about.gitlab.com/2016/06/29/may-2-2016-security-release-post-mortem/">
    <title>May 2, 2016 Security Release Post-Mortem | GitLab</title>
    <dc:date>2016-07-11T18:11:37+00:00</dc:date>
    <link>https://about.gitlab.com/2016/06/29/may-2-2016-security-release-post-mortem/</link>
    <dc:creator>peakscale</dc:creator><dc:subject>postmortem security</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:2d58ee825398/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:security"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="http://aws.amazon.com/message/4372T8/">
    <title>Summary of the AWS Service Event in the Sydney Region</title>
    <dc:date>2016-06-09T16:52:45+00:00</dc:date>
    <link>http://aws.amazon.com/message/4372T8/</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["The service disruption primarily affected EC2 instances and their associated Elastic Block Store (“EBS”) volumes running in a single Availability Zone. "]]></description>
<dc:subject>aws postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:f6f86b90e278/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:aws"/>
	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://users.rust-lang.org/t/crates-io-is-down-fixed/6060/3">
    <title>Crates.io is down [fixed] - The Rust Programming Language Forum</title>
    <dc:date>2016-06-01T19:28:59+00:00</dc:date>
    <link>https://users.rust-lang.org/t/crates-io-is-down-fixed/6060/3</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA[OK, a quick post-mortem:

At 9:45 AM PST I got a ping that crates.io was down and started looking into it. Connections via the website and from the 'cargo' command were timing out. From Heroku's logs it looks like the timeouts began around 9:10 AM.

From looking at logs (18 28) it's clear that connections were timing out, and that a number of postgres queries were blocked updating the download statistics10. These queries were occupying all available connections.

After killing outstanding queries the site is working again. It's not clear yet what the original cause was.]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:d7d54d49ed33/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="http://status.snowthegame.com/incidents/m8lgq1d2r7kl">
    <title>SNOW Status - Elevated Errors on SNOW Backend</title>
    <dc:date>2016-05-27T09:30:09+00:00</dc:date>
    <link>http://status.snowthegame.com/incidents/m8lgq1d2r7kl</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["Todays outage was because of a mis-configuration in our Redis cluster, where we didn't automatically prune stale cache keys."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:d706d6c1cc2d/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://envygeeks.io/jekyll/2016/05/21/postmortem-a-tale-of-how-discourse-almost-took-us-out">
    <title>Postmortem: A tale of how Discourse almost took us out.</title>
    <dc:date>2016-05-27T09:29:41+00:00</dc:date>
    <link>https://envygeeks.io/jekyll/2016/05/21/postmortem-a-tale-of-how-discourse-almost-took-us-out</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["TL;DR

This morning we noticed that Sidekiq had 13K jobs, it quickly escalated to 14K and then 17K and kept growing, for reasons we do not understand yet. We know this was initially cause by a large backlog of emails that needed to be sent because of exceptions that were occurring due to this bug, this is when things got interesting, and got wildly out of control."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:c7b5740b2a12/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.elastic.co/blog/elastic-cloud-outage-april-2016">
    <title>Elastic Cloud Outage: Root Cause and Impact Analysis | Elastic</title>
    <dc:date>2016-05-06T20:08:19+00:00</dc:date>
    <link>https://www.elastic.co/blog/elastic-cloud-outage-april-2016</link>
    <dc:creator>peakscale</dc:creator><description><![CDATA["What happened behind the scenes was that our Apache ZooKeeper cluster lost quorum, for the first time in more than three years. After recent maintenance, a heap space misconfiguration on the new nodes resulted in high memory pressure on the ZooKeeper quorum nodes, causing ZooKeeper to spend almost all CPU garbage collecting. When an auxiliary service that watches a lot of the ZooKeeper database reconnected, this threw ZooKeeper over the top, which in turn caused other services to reconnect – resulting in a thundering herd effect that exacerbated the problem."]]></description>
<dc:subject>postmortem</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:peakscale/b:00938bea5186/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:peakscale/t:postmortem"/>
</rdf:Bag></taxo:topics>
</item>
</rdf:RDF>