path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/foundation-marketing-analytics/week-3-managerial-rfm-segmentation.ipynb | ###Markdown
RFM Managerial SegmentationManegerial segmentation is simple and based on rules than ML or Statistiacal models.![Rules](rfm-seg-rules.png)
###Code
import java.util.concurrent.TimeUnit
import scala.collection.mutable.ListBuffer
import org.apache.spark.sql.Column
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
val schema = StructType(
List(
StructField("customer_id", StringType, false),
StructField("purchase_amount", DoubleType, false),
StructField("date_of_purchase", DateType, false)
)
)
val data = spark.read
.option("sep", "\t")
.option("mode","FAILFAST")
.option("dateFormat","YYYY-MM-dd")
.schema(schema)
.csv("../../data/foundation-marketing-analytics/purchases.txt")
.toDF
def enrich(in:DataFrame, dataBaseInvoiceDate: Column) : DataFrame = {
in
.withColumn("end_date", dataBaseInvoiceDate)
.withColumn("year_of_purchase", year($"date_of_purchase"))
.withColumn("days_since", datediff($"end_date", $"date_of_purchase"))
}
val enriched1 = enrich(data, lit("2016-01-01"))
enriched1.printSchema()
val OneYear = 365
val TwoYears = OneYear * 2
val ThreeYears = OneYear * 3
def calcRFM(in:DataFrame) : DataFrame = {
in
.groupBy($"customer_id")
.agg(
max($"days_since").alias("first_purchase"),
min($"days_since").alias("recency"),
count($"*").alias("frequency"),
avg($"purchase_amount").alias("amount"))
}
val enriched2 = calcRFM(enriched1)
enriched2.filter($"customer_id".isin("10", "90")).show(5)
###Output
+-----------+--------------+-------+---------+------+
|customer_id|first_purchase|recency|frequency|amount|
+-----------+--------------+-------+---------+------+
| 90| 3783| 758| 10| 115.8|
| 10| 3829| 3829| 1| 30.0|
+-----------+--------------+-------+---------+------+
###Markdown
First level segmentationCalculates only first level segmentation
###Code
def firstLevelSegmentation(in:DataFrame):DataFrame = {
in
.withColumn("segment1",
when($"recency" > ThreeYears, "inactive")
.when($"recency" > TwoYears && $"recency" <= ThreeYears, "cold")
.when($"recency" > OneYear && $"recency" <= TwoYears, "warm")
.otherwise("active"))
}
val segment1Level = firstLevelSegmentation(enriched2)
segment1Level.groupBy($"segment1").count().show()
segment1Level.show()
###Output
+--------+-----+
|segment1|count|
+--------+-----+
| warm| 1958|
| active| 5398|
| cold| 1903|
|inactive| 9158|
+--------+-----+
+-----------+--------------+-------+---------+------------------+--------+
|customer_id|first_purchase|recency|frequency| amount|segment1|
+-----------+--------------+-------+---------+------------------+--------+
| 6240| 3752| 3005| 3| 76.66666666666667|inactive|
| 52800| 3320| 3320| 1| 15.0|inactive|
| 100140| 2750| 13| 4| 51.25| active|
| 109180| 2616| 30| 8| 48.75| active|
| 131450| 2228| 205| 8| 103.75| active|
| 45300| 3667| 234| 6|29.166666666666668| active|
| 69460| 3179| 15| 9| 28.88888888888889| active|
| 86180| 2975| 2| 9| 21.11111111111111| active|
| 161110| 1528| 1528| 1| 30.0|inactive|
| 60070| 3288| 2074| 3|51.666666666666664|inactive|
| 13610| 3657| 1307| 8| 3043.75|inactive|
| 100010| 2750| 413| 7|27.857142857142858| warm|
| 107930| 2626| 150| 5| 79.0| active|
| 132610| 2211| 30| 7|28.571428571428573| active|
| 154770| 427| 427| 1| 45.0| warm|
| 49290| 3493| 371| 5| 24.0| warm|
| 229650| 419| 419| 1| 45.0| warm|
| 220290| 623| 366| 2| 40.0| warm|
| 178550| 1247| 3| 8| 56.25| active|
| 112480| 2577| 1890| 3|38.333333333333336|inactive|
+-----------+--------------+-------+---------+------------------+--------+
only showing top 20 rows
###Markdown
Second level segmentatiomCalculates ONLY 2nd level segmentation
###Code
//Make sure that the conditions for "warm new" and "active new" come eralier than other conditions with respective
//categories for accurate results
def secondLevelSegmentation(in:DataFrame) :DataFrame = {
in
.withColumn("segment2",
when($"segment1" === lit("warm") && $"first_purchase" <= TwoYears, "warm new")
.when($"segment1" === lit("warm") && $"amount" >= 100, "warm high value")
.when($"segment1" === lit("warm") && $"amount" < 100, "warm low value")
.when($"segment1" === lit("active") && $"first_purchase" <= OneYear, "active new")
.when($"segment1" === lit("active") && $"amount" >= 100, "active high value")
.when($"segment1" === lit("active") && $"amount" < 100, "active low value"))
}
val segment2Level = secondLevelSegmentation(segment1Level)
segment2Level.groupBy($"segment2").count().show()
segment2Level.show()
val cols = segment1Level.schema.fieldNames.map(col(_))
cols.foreach(println)
def segmentation(segment1Level:DataFrame, segment2Level:DataFrame) :DataFrame = {
segment1Level
.join(segment2Level, segment1Level("customer_id") === segment2Level("customer_id"), "inner")
.select(segment1Level("customer_id"),
segment1Level("first_purchase"),
segment1Level("recency"),
segment1Level("frequency"),
segment1Level("amount"),
segment1Level("segment1"),
segment2Level("segment2"))
.withColumn("segment", when(segment2Level("segment2").isNotNull, $"segment2").otherwise(segment1Level("segment1")))
.orderBy("segment")
}
val segmented = segmentation(segment1Level, segment2Level)
//Cache to simplify subsequent calculations
segmented.cache()
segmented.groupBy($"segment").count().show()
segmented.show()
###Output
customer_id
first_purchase
recency
frequency
amount
segment1
+-----------------+-----+
| segment|count|
+-----------------+-----+
|active high value| 573|
| active low value| 3313|
| active new| 1512|
| cold| 1903|
| inactive| 9158|
| warm high value| 119|
| warm low value| 901|
| warm new| 938|
+-----------------+-----+
+-----------+--------------+-------+---------+------------------+--------+-----------------+-----------------+
|customer_id|first_purchase|recency|frequency| amount|segment1| segment2| segment|
+-----------+--------------+-------+---------+------------------+--------+-----------------+-----------------+
| 131450| 2228| 205| 8| 103.75| active|active high value|active high value|
| 189280| 1106| 1| 3| 100.0| active|active high value|active high value|
| 170050| 1520| 13| 2| 100.0| active|active high value|active high value|
| 232050| 387| 44| 2| 250.0| active|active high value|active high value|
| 100580| 2739| 119| 11|104.54545454545455| active|active high value|active high value|
| 9630| 3801| 105| 11|116.36363636363636| active|active high value|active high value|
| 227630| 426| 74| 2| 100.0| active|active high value|active high value|
| 176240| 1280| 160| 4| 160.0| active|active high value|active high value|
| 18570| 3893| 342| 13|119.23076923076923| active|active high value|active high value|
| 141480| 2032| 69| 7|146.42857142857142| active|active high value|active high value|
| 132350| 2214| 71| 7|185.71428571428572| active|active high value|active high value|
| 175280| 1282| 71| 5| 105.0| active|active high value|active high value|
| 189110| 1107| 248| 3|103.33333333333333| active|active high value|active high value|
| 124350| 2360| 232| 6| 426.6666666666667| active|active high value|active high value|
| 143220| 2004| 34| 6|101.66666666666667| active|active high value|active high value|
| 115440| 2557| 1| 7|105.71428571428571| active|active high value|active high value|
| 233700| 367| 1| 2| 100.0| active|active high value|active high value|
| 93790| 2857| 261| 11| 560.6272727272727| active|active high value|active high value|
| 215510| 731| 20| 3| 500.0| active|active high value|active high value|
| 182630| 1163| 30| 4| 150.0| active|active high value|active high value|
+-----------+--------------+-------+---------+------------------+--------+-----------------+-----------------+
only showing top 20 rows
###Markdown
**NOTE: We can combine the calculation of both 1st and 2nd level segments into one code base but separating them simplifies testing and better maintenance** Profile of each segment for 2015
###Code
def segmentProfile(segmented: DataFrame, segColName: String) :DataFrame = {
segmented
.groupBy(col(segColName))
.agg(
round(avg($"recency"),2).alias("avg_r"),
round(avg($"frequency"),2).alias("avg_f"),
round(avg($"amount"),2).alias("avg_a"))
.orderBy(col(segColName))
}
segmentProfile(segmented, "segment").show(10, truncate=false)
###Output
_____no_output_____
###Markdown
Segment a Database Retrospectively i.e. 2014Taht is the segmentation of the database as if we were a **year ago**. **How did it work?**The first thing to do is to remember that we are a year ago. Meaning that whatever data we take into account, anything that has happened over the last 365 days should be discarded.We go back in time, assume the data that has been generated over the last year, for instance over the last period did not even exist. Adapt how we compute recency, frequency, monetary value and accordingly. And then we just apply everything we have applied before, same segmentation, same transformation, same analyses, and same tables.**Why do we need to segment retrospectively?**From a managerial point of view, it is also extremely useful to see not only to what extent each segment contributes to today's revenues. But also to what extent each segment today would likely contribute to tomorrow's revenues.
###Code
val customers2015 = segmented
val enriched2014 = enrich(data.filter(year($"date_of_purchase") <= 2014), lit("2015-01-01"))
val customers2014 = calcRFM(enriched2014)
val first = firstLevelSegmentation(customers2014)
segmentProfile(first, "segment1").show(10, truncate=false)
val second = secondLevelSegmentation(first)
segmentProfile(second, "segment2").show(10, truncate=false)
val segmented2014 = segmentation(first, second)
customers2015.printSchema()
customers2014.printSchema()
segmented2014.printSchema()
println("# of customers 2015: "+ customers2015.count())
println("# of customers 2014: "+ customers2014.count())
segmentProfile(segmented2014, "segment").show(10, truncate=false)
###Output
+--------+-------+-----+-----+
|segment1|avg_r |avg_f|avg_a|
+--------+-------+-----+-----+
|active |106.78 |4.31 |70.37|
|cold |866.62 |2.25 |51.11|
|inactive|2058.44|1.73 |48.11|
|warm |484.59 |2.58 |52.14|
+--------+-------+-----+-----+
+-----------------+-------+-----+------+
|segment2 |avg_r |avg_f|avg_a |
+-----------------+-------+-----+------+
|null |1792.95|1.85 |48.78 |
|active high value|85.34 |5.7 |261.9 |
|active low value |98.09 |5.63 |40.46 |
|active new |132.09 |1.07 |69.73 |
|warm high value |461.2 |4.41 |187.85|
|warm low value |470.66 |4.36 |37.38 |
|warm new |497.32 |1.06 |51.37 |
+-----------------+-------+-----+------+
root
|-- customer_id: string (nullable = true)
|-- first_purchase: integer (nullable = true)
|-- recency: integer (nullable = true)
|-- frequency: long (nullable = false)
|-- amount: double (nullable = true)
|-- segment1: string (nullable = false)
|-- segment2: string (nullable = true)
|-- segment: string (nullable = true)
root
|-- customer_id: string (nullable = true)
|-- first_purchase: integer (nullable = true)
|-- recency: integer (nullable = true)
|-- frequency: long (nullable = false)
|-- amount: double (nullable = true)
root
|-- customer_id: string (nullable = true)
|-- first_purchase: integer (nullable = true)
|-- recency: integer (nullable = true)
|-- frequency: long (nullable = false)
|-- amount: double (nullable = true)
|-- segment1: string (nullable = false)
|-- segment2: string (nullable = true)
|-- segment: string (nullable = true)
# of customers 2015: 18417
# of customers 2014: 16905
+-----------------+-------+-----+------+
|segment |avg_r |avg_f|avg_a |
+-----------------+-------+-----+------+
|active high value|85.34 |5.7 |261.9 |
|active low value |98.09 |5.63 |40.46 |
|active new |132.09 |1.07 |69.73 |
|cold |866.62 |2.25 |51.11 |
|inactive |2058.44|1.73 |48.11 |
|warm high value |461.2 |4.41 |187.85|
|warm low value |470.66 |4.36 |37.38 |
|warm new |497.32 |1.06 |51.37 |
+-----------------+-------+-----+------+
###Markdown
Revenue Generation Per Segment
###Code
//Compute how much revenue is generated by each segment in 2015
//Notice that people with no revenue in 2015 do NOT appear
//i.e. we select only active customers
val revenue2015 = enriched1
.filter($"year_of_purchase" === 2015)
.groupBy($"customer_id")
.agg(sum($"purchase_amount").alias("revenue_2015"))
revenue2015.describe("revenue_2015").show()
###Output
+-------+------------------+
|summary| revenue_2015|
+-------+------------------+
| count| 5398|
| mean| 88.62432938125232|
| stddev|224.35689735796478|
| min| 5.0|
| max| 4500.0|
+-------+------------------+
###Markdown
Show avg. revenue per customers and per segment for 2015
###Code
//we need to do left-join so that we can bring the customers who didn't generate revenue for 2015 i.e. didnt
//make any purchases in 2015
val actuals = customers2015
.join(revenue2015, Seq("customer_id"), "left")
.na
.fill(0.0, Seq("revenue_2015"))
println("No of rows: "+actuals.count())
actuals.describe("revenue_2015").show()
actuals
.groupBy($"segment")
.agg(round(avg($"revenue_2015"),2).alias("avg_revenue_2015"))
.orderBy($"segment")
.show()
###Output
No of rows: 18417
+-------+------------------+
|summary| revenue_2015|
+-------+------------------+
| count| 18417|
| mean|25.975681707118422|
| stddev| 127.9801632917415|
| min| 0.0|
| max| 4500.0|
+-------+------------------+
+-----------------+----------------+
| segment|avg_revenue_2015|
+-----------------+----------------+
|active high value| 323.57|
| active low value| 52.31|
| active new| 79.17|
| cold| 0.0|
| inactive| 0.0|
| warm high value| 0.0|
| warm low value| 0.0|
| warm new| 0.0|
+-----------------+----------------+
###Markdown
Show avg. revenue per customers and per segment for 2014 (FORWARD looking)How much revenue you can expect from your active customers today (today in this data set is 2015), next year. We don't' know the future, we don't know exactly what's going to happen, but the one thing we can do, is to go back in the past (2014). And look at how much revenue we got from inactive customers in 2014, going into 2015. And that's the next step of this analysis. So what we'll do, is to merge the revenue generated in 2015, as before. But we're going to merge them with the customer list of 2014. And so we're going to look into, how much revenue's been generated by each customer, based on the segment they were in, a year ago.And that's why we call it forward. Forward, as the segment in 2014 will enlight us, about how much revenue have been generated in 2015 from these customers.
###Code
//Merge 2014 customers with 2015 revenue
val forward = segmented2014
.join(revenue2015, Seq("customer_id"), "left")
.na
.fill(0.0, Seq("revenue_2015"))
forward.describe("revenue_2015").show()
forward
.groupBy($"segment")
.agg(round(avg($"revenue_2015"),2).alias("avg_revenue_2015"))
.orderBy($"segment")
.show()
###Output
+-------+------------------+
|summary| revenue_2015|
+-------+------------------+
| count| 16905|
| mean|21.218273883466434|
| stddev|111.24529944791601|
| min| 0.0|
| max| 4500.0|
+-------+------------------+
+-----------------+----------------+
| segment|avg_revenue_2015|
+-----------------+----------------+
|active high value| 254.08|
| active low value| 41.9|
| active new| 31.05|
| cold| 6.11|
| inactive| 2.95|
| warm high value| 114.46|
| warm low value| 13.49|
| warm new| 5.06|
+-----------------+----------------+
|
.ipynb_checkpoints/wikidata-org-checkpoint.ipynb | ###Markdown
Wikidata.orgWikidata.org allows you to get information from Wiki sites through an API.Using the API requires you to write the query in [SPARQL](https://en.wikipedia.org/wiki/SPARQL), but there is plenty of example queries to work from at https://query.wikidata.org allowing you to get started without previous knowledge about this SPARQL. Wikidata and MachinelearningAs Wiki is an internatinal project in +300 languages and has a lot of articles, Wikidata is a good start for harvesting data to use in machinelearning* Example SPARQL queries * All [Swedish politicians in the Swedish Goverment](http://tinyurl.com/y4jo4lva) * just those with a [twitter account](http://tinyurl.com/y4qb32q9)* In may started also the [Lexicographical data project](https://www.wikidata.org/wiki/Wikidata:Lexicographical_data/en) with the goal to "describe precisely all words in all languages" in a machinereadable formMore information:* [Wikidata](https://www.wikidata.org/wiki/Wikidata:Main_Page)* [The query editor](https://query.wikidata.org/)
###Code
from IPython.display import YouTubeVideo
YouTubeVideo("AR66WVBViBQ")
###Output
_____no_output_____
###Markdown
Example: Swedish Prime ministers
###Code
# Install a dependency using a terminal command
!pip install --quiet sparqlwrapper
# Allows us to use IPython.display.JSON
import IPython.display
from SPARQLWrapper import SPARQLWrapper, JSON
endpoint_url = "https://query.wikidata.org/sparql"
query = """# Query Find in WikiData people with position held P39 Swedish Prime minister Q687075
#
select ?person ?personLabel ?personDescription ?replacedbyLabel ?start ?pic ?end {
{
?person wdt:P39 wd:Q687075;
p:P39 [
ps:P39 wd:Q687075;
pq:P580 ?start;
pq:P582 ?end;
pq:P1365 ?replace; #Replace
pq:P1366 ?replacedby #Replaced by
].
OPTIONAL{ ?person wdt:P18 ?pic .} # If we have an illustration
}
SERVICE wikibase:label { bd:serviceParam wikibase:language "en"}
}
Order by ?start"""
def get_results(endpoint_url, query):
sparql = SPARQLWrapper(endpoint_url)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
return sparql.query().convert()
results = get_results(endpoint_url, query)
IPython.display.JSON(results)
for result in results["results"]["bindings"]:
print(result["personLabel"]["value"], result["start"]["value"], " - ", result["end"]["value"])
###Output
Arvid Posse 1880-04-19T00:00:00Z - 1883-06-13T00:00:00Z
Carl Johan Thyselius 1883-06-13T00:00:00Z - 1884-05-16T00:00:00Z
Robert Themptander 1884-05-16T00:00:00Z - 1888-02-06T00:00:00Z
Gillis Bildt 1888-02-06T00:00:00Z - 1889-10-12T00:00:00Z
Gustaf Åkerhielm 1889-10-12T00:00:00Z - 1891-07-10T00:00:00Z
Erik Gustaf Boström 1891-07-10T00:00:00Z - 1900-09-12T00:00:00Z
Fredrik von Otter 1900-09-12T00:00:00Z - 1902-07-05T00:00:00Z
Erik Gustaf Boström 1902-07-05T00:00:00Z - 1905-04-13T00:00:00Z
Johan Ramstedt 1905-04-13T00:00:00Z - 1905-08-02T00:00:00Z
Christian Lundeberg 1905-08-02T00:00:00Z - 1905-11-07T00:00:00Z
Karl Staaff 1905-11-07T00:00:00Z - 1906-05-29T00:00:00Z
Arvid Lindman 1906-05-29T00:00:00Z - 1911-10-07T00:00:00Z
Karl Staaff 1911-10-07T00:00:00Z - 1914-02-17T00:00:00Z
Hjalmar Hammarskjöld 1914-02-17T00:00:00Z - 1917-03-30T00:00:00Z
Carl Swartz 1917-03-30T00:00:00Z - 1917-10-19T00:00:00Z
Nils Edén 1917-10-19T00:00:00Z - 1920-03-10T00:00:00Z
Hjalmar Branting 1920-03-10T00:00:00Z - 1920-10-27T00:00:00Z
Gerhard Louis De Geer 1920-10-27T00:00:00Z - 1921-02-23T00:00:00Z
Oscar von Sydow 1921-02-23T00:00:00Z - 1921-10-13T00:00:00Z
Hjalmar Branting 1921-10-13T00:00:00Z - 1923-04-19T00:00:00Z
Ernst Trygger 1923-04-19T00:00:00Z - 1924-10-18T00:00:00Z
Hjalmar Branting 1924-10-18T00:00:00Z - 1925-01-24T00:00:00Z
Rickard Sandler 1925-01-24T00:00:00Z - 1926-06-07T00:00:00Z
Carl Gustaf Ekman 1926-06-07T00:00:00Z - 1928-10-02T00:00:00Z
Arvid Lindman 1928-10-02T00:00:00Z - 1930-06-07T00:00:00Z
Carl Gustaf Ekman 1930-06-07T00:00:00Z - 1932-08-06T00:00:00Z
Felix Hamrin 1932-08-06T00:00:00Z - 1932-09-24T00:00:00Z
Per Albin Hansson 1932-09-24T00:00:00Z - 1936-06-19T00:00:00Z
Axel Pehrsson-Bramstorp 1936-06-19T00:00:00Z - 1936-09-28T00:00:00Z
Per Albin Hansson 1936-09-28T00:00:00Z - 1946-10-06T00:00:00Z
Tage Erlander 1946-10-11T00:00:00Z - 1969-10-14T00:00:00Z
Olof Palme 1969-10-14T00:00:00Z - 1976-10-08T00:00:00Z
Thorbjörn Fälldin 1976-10-08T00:00:00Z - 1978-10-18T00:00:00Z
Ola Ullsten 1978-10-18T00:00:00Z - 1979-10-12T00:00:00Z
Thorbjörn Fälldin 1979-10-12T00:00:00Z - 1982-10-08T00:00:00Z
Olof Palme 1982-10-08T00:00:00Z - 1986-02-28T00:00:00Z
Ingvar Carlsson 1986-03-01T00:00:00Z - 1991-10-04T00:00:00Z
Carl Bildt 1991-10-04T00:00:00Z - 1994-10-07T00:00:00Z
Ingvar Carlsson 1994-10-07T00:00:00Z - 1996-03-22T00:00:00Z
Göran Persson 1996-03-22T00:00:00Z - 2006-10-06T00:00:00Z
Fredrik Reinfeldt 2006-10-06T00:00:00Z - 2014-10-03T00:00:00Z
###Markdown
Example: Airports near Berlin
###Code
from SPARQLWrapper import SPARQLWrapper, JSON
endpoint_url = "https://query.wikidata.org/sparql"
query = """SELECT ?place ?placeLabel ?location
WHERE
{
# Berlin coordinates
wd:Q64 wdt:P625 ?berlinLoc .
SERVICE wikibase:around {
?place wdt:P625 ?location .
bd:serviceParam wikibase:center ?berlinLoc .
bd:serviceParam wikibase:radius "100" .
} .
# Is an airport
FILTER EXISTS { ?place wdt:P31/wdt:P279* wd:Q1248784 } .
SERVICE wikibase:label {
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en" .
}
}"""
def get_results(endpoint_url, query):
sparql = SPARQLWrapper(endpoint_url)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
return sparql.query().convert()
results = get_results(endpoint_url, query)
IPython.display.JSON(results)
for result in results["results"]["bindings"]:
print(result["placeLabel"]["value"])
###Output
Berlin Tempelhof Airport
Berlin Schönefeld Airport
Berlin Tegel Airport
Dedelow Airport
Nauen Airport
Segeletz Airport
Lusse Airport
Eisenhuttenstadt Airport
Eisenhuttenstadt Airport
Kyritz Airport
Berlin Brandenburg Airport
Johannisthal Air Field
Kunice
Chojna
|
notebooks/encoder_analyze.ipynb | ###Markdown
Encoded output
###Code
inputs = ["erlauben sie bitte, dass ich mich kurz vorstelle. ich heiße jana novakova.||fr",
"erlauben sie bitte, dass ich mich kurz vorstelle. ich heiße jana novakova.||de",
"les socialistes et les républicains sont venus apporter leurs voix à la majorité pour ce texte.||fr",
"les socialistes et les républicains sont venus apporter leurs voix à la majorité pour ce texte.||de"]
embeddings = encode(model, inputs[:-2])
tsne = [(t, c, TSNE(n_components=2).fit_transform(e)) for (t, c, e) in embeddings]
fig = plt.figure(figsize=(19, 25))
for i, (t, c, e) in enumerate(tsne):
ax = plt.subplot(4, 3, i + 1)
for j in range(len(t)):
plt.scatter(e[j, 0], e[j, 1], c='k', marker=r"$ {} $".format(t[j].replace(" ", "/")), alpha=0.7, s=50)
#ax.set_title(f'pool_layer={i + 1}')
plt.tight_layout()
plt.subplots_adjust(bottom=0.1, right=0.95, top=0.9)
#cax = plt.axes([0.96, 0.6, 0.02, 0.3])
#cbar = plt.colorbar(cax=cax, ticks=range(len(texts)))
#cbar.ax.get_yaxis().set_ticks([])
#for j, lab in enumerate(texts.keys()):
# cbar.ax.text(4.25, (2 * j + 1) / 2.25, lab, ha='center', va='center', fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
Language embedding
###Code
def show_similarity(similarity, languages):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
diagonal = similarity[1:,:-1].copy()
lower_indices = np.tril_indices(diagonal.shape[0])
lower = diagonal[lower_indices]
lower_min = np.min(lower)
lower_max = np.max(lower)
diagonal = (diagonal - lower_min) / (lower_max - lower_min)
cax = ax.matshow(np.tril(diagonal), interpolation='nearest')
fig.colorbar(cax)
ax.set_xticklabels([languages[0]]+languages[:-1], rotation='vertical')
ax.set_yticklabels(languages)
plt.show()
embeddings = model._encoder._embedding.weight.detach().numpy()
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
similarity = cosine_similarity(embeddings)
show_similarity(similarity, hp.languages)
layer_number = 2
layer_weights = model._encoder._layers[layer_number]._convolution._bottleneck.weight.detach().numpy()
bottleneck_embeddings = embeddings @ layer_weights.T
bottleneck_similarity = cosine_similarity(bottleneck_embeddings)
show_similarity(bottleneck_similarity, hp.languages)
###Output
_____no_output_____ |
Exam/exam_part2_mcmc.ipynb | ###Markdown
Exercice**Determination of the metallicity and projected rotation velocity of the nearby solar type star HD4308.** Required libraries numpy, matplotlib, astropy, corner + an MCMC sampler of your choice (emcee, sam, ...) IntroductionHigh Resolution spectra of solar type stars allow to compute the Cross Correlation Function (CCF) of the spectrum of the star with respect to a template built using a Sun very high signal to noise and high resolution spectrum.The resulting CCF can be seen as an "average stellar absorption line".It contains several interesting information about the star.The center of this CCF gives information about the radial velocity of the star with respect to the observer (Doppler effect).The depth (or contrast) and the width of the CCF give informations about the projected stellar rotation velocity ($v\sin i$),as well as the metallicity content of the star (FeH).The relation between the contrast and width measured on the CCF and the stellar properties ($v\sin i$ and FeH) is not simple.However, empirical relations based on the $B-V$ color index of the star have been obtained.In this problem, you will derive the $v\sin i$ and metallicity of HD4308 which is a G6 dwarf, with color index $B-V=0.641$ in two steps:- You will adjust a model on the CCF of this star in order to determine its basic properties (center, width, contrast). You will use a MCMC algorithm in order to sample the probability distribution of these parameters.- You will use the provided empirical relations to derive the $v\sin i$ and metallicity (and their distributions). Loading data (nothing to do here)We load the data from the HD4308_CCF.csv and plot it.
###Code
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import corner
import emcee
data = ascii.read('HD4308_CCF.csv', data_start=2, delimiter=';')
BmV=0.641
Ncor=len(data['rv'])
fig, ax1 = plt.subplots(1, 1, figsize=(15, 3), constrained_layout=True)
ax1.errorbar(data['rv'], data['ccf'], data['ccfErr'])
ax1.set_xlabel('Radial Velocity [km/s]')
ax1.set_ylabel('Cross Correlation Function')
ax1.set_title('HD4308')
###Output
_____no_output_____
###Markdown
Question 1)As you can see in the plot above, the CCF has been computed as a function of the velocity $v$.For each velocity $v_i$ in the vector data['rv'], we provide the CCF value (CCF$(v_i)$)as well as its uncertainty, in the vectors data['ccf'] and data['ccfErr'].\The noise is assumed to be gaussian and independent.In order to determine the center ($v_0$), width ($w$) and contrast ($c$) of the CCF,you will model it with a Gaussian curve:$$ f(v) = 1 - c \exp\left(-\frac{1}{2}\frac{(v-v_0)^2}{w^2}\right). $$This model has already been implemented in the "ccf_model" function below.**a) Write the "loglikelihood" function below.**We assumed uniform priors for each parameter ($v_0$, $w$ and $c$), and implemented the corresponding "logprior" function.**b) Write the "logprobability" function below.**
###Code
vmin = np.min(data['rv'])
vmax = np.max(data['rv'])
Dv = vmax-vmin
dv = Dv/data['rv'].size
def ccf_model(param):
contrast, v0, width = param
return(1 - contrast * np.exp(-0.5*((data['rv'] - v0)/width)**2))
def logprior(param):
contrast, v0, width = param
if contrast < 0 or contrast > 1:
return(-np.inf)
lp = 0
if v0 < vmin or v0 > vmax:
return(-np.inf)
lp -= np.log(Dv)
if width < 4*dv or width > Dv/2:
return(-np.inf)
lp -= np.log(Dv/2 - 4*dv)
return(lp)
def loglikelihood(param):
################### TODO ###################
y = data['ccf']
ye = data['ccfErr']
ff = ccf_model(param)
lp = ((y-ff)/ye)**2
l1 = -0.5*np.sum(lp)
return l1
def logprobability(param):
################### TODO ###################
bb = logprior(param) + loglikelihood(param)
return bb
###Output
_____no_output_____
###Markdown
Question 2)Now that you have defined the "logprobability" function, you can run a MCMC algorithm to explore the parameter space.We let you choose which algorithm to use (emcee, sam, etc.).Do not forget to **import** it first.We provide below a **very crude** guess of the parameters in order to initialize the MCMC.
###Code
# Initialize model parameters
paramInit = np.empty(3)
paramInit[0] = 1.-np.min(data['ccf']) # contrast (depth)
paramInit[1] = data['rv'][np.where(data['ccf']==np.min(data['ccf']))[0][0]] # center
paramInit[2] = np.ptp(data['rv'][data['ccf']<(1-paramInit[0]/2)])*0.5 # width
# Plot the corresponding model and residuals
fig, axs = plt.subplots(2, 1, sharey=False, sharex=True, figsize=(15, 3), constrained_layout=True)
axs[0].plot(data['rv'], data['ccf'])
axs[0].plot(data['rv'], ccf_model(paramInit), 'r', linewidth=0.5, linestyle=':')
axs[0].axhline(1, linestyle='--', linewidth=0.5, color='black')
axs[0].set_ylabel('Contrast')
axs[1].plot(data['rv'], data['ccf']-ccf_model(paramInit), 'k')
axs[1].axhline(0, linestyle='--', linewidth=0.5, color='black')
axs[1].set_ylim((-0.05, 0.05))
axs[1].set_ylabel('residuals')
###Output
_____no_output_____
###Markdown
**a) Use the MCMC algorithm of your choice to sample the posterior distribution of the parameters**
###Code
################### TODO ###################
nwalkers, ndim = 32, 3
posInit = paramInit * np.random.randn(nwalkers,ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, logprobability)
sampler.run_mcmc(posInit, 10000, progress=True)
###Output
0%| | 0/10000 [00:00<?, ?it/s]/home/jayshil/anaconda3/lib/python3.7/site-packages/emcee/moves/red_blue.py:99: RuntimeWarning: invalid value encountered in double_scalars
lnpdiff = f + nlp - state.log_prob[j]
100%|██████████| 10000/10000 [01:07<00:00, 148.55it/s]
###Markdown
**b) Plot the MCMC chains and comment. Define the samples that can be used to conduct a statistical analysis.**
###Code
################### TODO ###################
fig, axes = plt.subplots(3, figsize=(10, 7), sharex=True)
samples = sampler.get_chain()
labels = ['contrast', 'v0', 'width']
for i in range(ndim):
ax = axes[i]
ax.plot(samples[:, :, i], "k", alpha=0.3)
ax.set_xlim(0, len(samples))
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number");
###Output
_____no_output_____
###Markdown
Samples after first ~2000 can be used for statistical analysis after the chains become stable. **c) Display a corner plot of the samples and comment. Which parameters are correlated?**
###Code
################### TODO ###################
flat_samples = sampler.get_chain(discard=1000, thin=15, flat=True)
fig = corner.corner(
flat_samples, labels=labels, truths=[paramInit[0], paramInit[1], paramInit[2]]
);
###Output
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
###Markdown
Although there are some scatter in the values, large part of the values are centered on the initial estimates. Since the distribution is too much centered on the value, we are not able to see any correlation. Question 3)The final step is to apply the empirical relations to derive the metallicity and rotational velocity of the star.We provide the two functions below that implement these empirical relations.
###Code
def computeFeH(width, contrast, BmV):
#Empirical relation that relates the CCF parameters to the stellar metallicity
a, b, c, d = -0.013, 4.798, -5.197, 2.161 # coeff FeH.
#CCF peak equivalent width
W_kmps = width * contrast * np.sqrt(2 * np.pi)
FeH = a + b*np.log10(W_kmps) + c*BmV + d*BmV**2
return(FeH)
def computeVsini(width, BmV):
Ar, e, f, g, h = 1.579, -3.328, 10.265, -9.467, 5.991 # coeff for vsini.
ccfFwhm_kmps = 2*np.sqrt(2*np.log(2))*width
sig0_kmps = e + f*BmV + g*BmV**2 + h*BmV**3
vsini = Ar*np.sqrt(width**2 - sig0_kmps**2)
return(vsini)
###Output
_____no_output_____
###Markdown
**a) Use these functions to derive the probability distributions of the metallicity and rotational velocity**
###Code
################### TODO ###################
cont = flat_samples[:,0]
v0 = flat_samples[:,1]
wid = flat_samples[:,2]
meta = computeFeH(wid, cont, v0)
velo = computeVsini(wid, v0)
###Output
/home/jayshil/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:8: RuntimeWarning: invalid value encountered in log10
/home/jayshil/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:15: RuntimeWarning: invalid value encountered in sqrt
from ipykernel import kernelapp as app
###Markdown
**b) Compute the mean, median and standard deviation of these stellar properties**
###Code
################### TODO ###################
print(np.mean(meta), np.median(meta), np.std(meta))
print(np.mean(velo), np.median(velo), np.std(velo))
###Output
nan nan nan
nan nan nan
###Markdown
**c) Display a corner plot of these two parameters, comment. Are these parameters correlated?Compute the covariance and the correlation between FeH and vsini.**
###Code
################### TODO ###################
data1 = np.vstack((meta, velo))
data = np.transpose(data1)
figure = corner.corner(data)
###Output
_____no_output_____ |
t1_questions/item_05.ipynb | ###Markdown
Item VImplement in Jupyter Notebook the Lagrange Interpolation method with *sympy*.Then, find the interpolation polynomial for the following points:* $(0,1),(1,2),(2,4)$. Is a second degree polynomial? If not, why is this?* $(0,1),(1,2),(2,3)$. Is a second degree polynomial? If not, why is this?---
###Code
def lagrange(xs,ys):
assert(len(xs)==len(ys))
n = len(xs)
x = sympy.Symbol('x')
poly = 0
for j in range(0,n):
lag = ys[j]
for m in range(0,n):
if j!=m:
lag *= (x-xs[m])/(xs[j]-xs[m])
poly += lag
return sympy.simplify(poly)
lagrange([0,1,2],[1,2,4])
lagrange([0,1,2],[1,2,3])
###Output
_____no_output_____ |
notebooks/SemEval-2016.ipynb | ###Markdown
SemEval-2016 Dataset Statistics
###Code
from pathlib import Path
from bs4 import BeautifulSoup
train_file = Path.cwd().joinpath("reference/subtask1_rr_en/train.xml")
soup = None
with train_file.open(encoding="utf-8") as f:
soup = BeautifulSoup(f.read().strip(), "lxml-xml")
if soup is None:
raise Exception("Can't read xml file")
sentence_nodes = soup.find_all("sentence")
sentences = []
entities = []
for n in sentence_nodes:
s = n.find("text").string
e_count = 0
if n.find("Opinions"):
for c in n.find("Opinions").contents:
if c.name == "Opinion":
e = {
"sentence_id": n["id"],
"target": c["target"],
"category": c["category"],
"polarity": c["polarity"]
}
entities.append(e)
e_count += 1
sentences.append({
"sentence_id": n["id"],
"sentence": s,
"entities": e_count
})
print("{} sentences.".format(len(sentences)))
print("{} entities.".format(len(entities)))
import pandas as pd
sentences = pd.DataFrame(sentences)
entities = pd.DataFrame(entities)
sentences.head(5)
entities.head(5)
###Output
_____no_output_____
###Markdown
Label Distribution
###Code
%matplotlib inline
entities.groupby(["category"]).count()["target"].sort_values(ascending=False).plot(kind="bar")
(entities.groupby(["category"]).count()["target"].sort_values(ascending=False).cumsum() * 100 / len(entities)).plot.line(secondary_y=True, style="g", rot=90)
entities.groupby(["polarity"]).count()["target"].plot.bar()
entities.groupby(["polarity", "category"]).count()["target"].divide(entities.groupby(["category"]).count()["target"]).unstack("polarity").plot.bar(stacked=True)
###Output
_____no_output_____
###Markdown
Sentence Distribution
###Code
sentences[sentences["entities"] > 0].groupby(["entities"]).count()["sentence_id"].plot.bar()
###Output
_____no_output_____ |
notebooks/espresso.ipynb | ###Markdown
These cells are used to pre-process the data.They only need to be run once, and after that the saved data file can be loaded up from disk.
###Code
data = wobble.Data()
filenames = glob.glob('/Users/mbedell/python/wobble/data/toi/TOI-*_CCF_A.fits')
for filename in tqdm(filenames):
try:
sp = wobble.Spectrum()
sp.from_ESPRESSO(filename, process=True)
data.append(sp)
except Exception as e:
print("File {0} failed; error: {1}".format(filename, e))
data.write('../data/toi.hdf5')
###Output
_____no_output_____
###Markdown
Load the data
###Code
data = wobble.Data(filename='../data/toi.hdf5')
R = np.copy(data.R) # we'll need this later
data
data.drop_bad_orders(min_snr=3)
data.drop_bad_epochs(min_snr=3)
data.orders
r = 0
good = data.ivars[r] > 0.
for e in [0,10,20]:
plt.errorbar(data.xs[r][e][good[e]], data.ys[r][e][good[e]],
1./np.sqrt(data.ivars[r][e][good[e]]), ls='', fmt='o', ms=2, alpha=0.5)
plt.title('Echelle order #{0}'.format(data.orders[r]), fontsize=14);
###Output
_____no_output_____
###Markdown
Generate regularization parameter files & tune them Since we don't have any existing regularization parameter files for ESPRESSO, we have to make some new ones. This is needed because the default *wobble* regularization is tuned to HARPS, which has a different number of spectral orders and different wavelength coverage - if we try to run with those files, the optimization will *(a)* be non-optimal and *(b)* eventually crash when we try to access an order than does not exist for HARPS.
###Code
star_filename = '../wobble/regularization/toi_star.hdf5'
tellurics_filename = '../wobble/regularization/toi_tellurics.hdf5'
wobble.generate_regularization_file(star_filename, R, type='star')
wobble.generate_regularization_file(tellurics_filename, R, type='telluric')
plot_dir = '../regularization/toi/'
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
###Output
_____no_output_____
###Markdown
We'll tune the regularization using a train-and-validate approach, so let's set aside some epochs to be the validation set:
###Code
validation_epochs = np.random.choice(data.N, data.N//6, replace=False) # 3 epochs for validation set
r = 100
for e in [validation_epochs[0]]:
plt.errorbar(data.xs[r][e][good[e]], data.ys[r][e][good[e]],
1./np.sqrt(data.ivars[r][e][good[e]]), ls='', fmt='o', ms=2, alpha=0.5)
###Output
_____no_output_____
###Markdown
Here's an example of how this regularization tuning will go for one order:
###Code
r = 100
o = data.orders[r]
objs = wobble.setup_for_order(r, data, validation_epochs)
wobble.improve_order_regularization(o, star_filename, tellurics_filename,
*objs,
verbose=False, plot=False,
basename='{0}o{1}'.format(plot_dir, o),
K_t=0, L1=True, L2=True)
###Output
_____no_output_____ |
Lichaamsoppervlak.ipynb | ###Markdown
Druk in onderstaande vakje (cel) op shift-enter. Om na wijziging van een cel alles opnieuw uit te voeren, klik op onderstaande cel en druk weer op shift-enter.
###Code
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
}
);
</script>
from math import sqrt
from math import log as ln
import numpy as np
import pylab as pl
from matplotlib.widgets import Slider
%matplotlib inline
###Output
/Users/grange/anaconda/envs/py2/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
###Markdown
Vergelijking vam de verschillende formules voor het berekenen van het huidoppervlak. Eerdt gaan we vragen de waarden in te vullen waarna alles wordt uitgerekend.
###Code
lengte = float(input("Vul lengte in centimeter in: ").replace(",","."))
gewicht = float(input("Vul gewicht in kg in: ").replace(",","."))
if lengte < 5:
print ("\nWaarschuwing: lengte van %s cm lijkt me te kort voor een mens. "\
"Ik neem aan dat de lengte in meters is ingevuld en ga verder rekenen met %s cm. "\
%(lengte, lengte*100.))
lengte *= 100.
###Output
Vul lengte in centimeter in: 190
###Markdown
In de volgende cellen worden de formules gedefinieerd waarmee we verderop gaan rekenen
###Code
def Mosteller(L, M):
LM = L * M
LM = LM / 3600.
return sqrt(LM)
def machtsformule(A, B, C, L, M):
LM = pow(L,B) * pow(M,C)
return A * LM
def GehanGeorge(L, M):
return machtsformule(A=0.0235, B=0.42246, C=0.51456,
L=L, M=M)
def Haycock(L, M):
return machtsformule(A=0.024265, B=0.3964, C=0.5378,
L=L, M=M)
def Dubois(L, M):
return machtsformule(A=0.007184, B=0.725, C=0.425,
L=L, M=M)
def Boyd(L, M):
C = 0.6157 - (0.00816474 * ln(M))
return machtsformule(A=0.0332965, B=0.3, C=C,
L=L, M=M)
def Schlich_vrouw(L, M):
return machtsformule(A=0.0009754382, B=1.08, C=0.46,
L=L, M=M)
def Schlich_man(L, M):
return machtsformule(A=0.000579479, B=1.24, C=0.38,
L=L, M=M)
def Fujimoto(L, M):
return machtsformule(A=0.008883, B=0.663, C=0.444,
L=L, M=M)
def Takahira(L, M):
return machtsformule(A=0.007241, B=0.725, C=0.425,
L=L, M=M)
###Output
_____no_output_____
###Markdown
Hieronder worden alle resultaten onder elkaar gezet.
###Code
print ("{0:<16} {1}".format("Mosteller", Mosteller(lengte, gewicht)))
print ("{0:<16} {1}".format("Gehan & George", GehanGeorge(lengte, gewicht)))
print ("{0:<16} {1}".format("Haycock", Haycock(lengte, gewicht)))
print ("{0:<16} {1}".format("Dubois", Dubois (lengte, gewicht)))
print ("{0:<16} {1}".format("Boyd", Boyd(lengte, gewicht)))
print ("{0:<16} {1}".format("Schlich (man):", Schlich_man(lengte, gewicht)))
print ("{0:<16} {1}".format("Schlich (vrouw)", Schlich_vrouw(lengte, gewicht)))
print ("{0:<16} {1}".format("Fujimoto",Fujimoto(lengte, gewicht)))
print ("{0:<16} {1}".format("Takahira",Takahira(lengte, gewicht)))
###Output
_____no_output_____
###Markdown
En nu eens kijken hoe de verschillende modellen zich gedragen als functie van lengte en gewicht, op basis van de eerder ingevulde waarden voor lengte en gewicht.
###Code
lengtes = np.linspace(160, 210, 50)
gewichten = np.linspace(70, 140, 70)
mostvals = [Mosteller(L, gewicht) for L in lengtes]
gehgeovals = [GehanGeorge(L, gewicht) for L in lengtes]
hayvals = [Haycock(L, gewicht) for L in lengtes]
dubvals = [Dubois(L, gewicht) for L in lengtes]
boydvals = [Boyd(L, gewicht) for L in lengtes]
sch_mvals = [Schlich_man(L, gewicht) for L in lengtes]
sch_vvals = [Schlich_vrouw(L, gewicht) for L in lengtes]
fujivals = [Fujimoto(L, gewicht) for L in lengtes]
takavals = [Takahira(L, gewicht) for L in lengtes]
pl.plot(lengtes, mostvals)
pl.plot(lengtes, gehgeovals)
pl.plot(lengtes, hayvals)
pl.plot(lengtes, dubvals)
pl.plot(lengtes, boydvals)
pl.plot(lengtes, sch_mvals)
pl.plot(lengtes, sch_vvals)
pl.plot(lengtes, fujivals)
pl.plot(lengtes, takavals)
pl.xlabel("Lengte (cm)")
pl.ylabel("Huidoppervlak (cm^2)")
pl.title("bij een gewicht van %s kilo"%gewicht);
mostvals = [Mosteller(lengte, G) for G in gewichten]
gehgeovals = [GehanGeorge(lengte, G) for G in gewichten]
hayvals = [Haycock(lengte, G) for G in gewichten]
dubvals = [Dubois(lengte, G) for G in gewichten]
boydvals = [Boyd(lengte, G) for G in gewichten]
sch_mvals = [Schlich_man(lengte, G) for G in gewichten]
sch_vvals = [Schlich_vrouw(lengte, G) for G in gewichten]
fujivals = [Fujimoto(lengte, G) for G in gewichten]
takavals = [Takahira(lengte, G) for G in gewichten]
pl.plot(gewichten, mostvals)
pl.plot(gewichten, gehgeovals)
pl.plot(gewichten, hayvals)
pl.plot(gewichten, dubvals)
pl.plot(gewichten, boydvals)
pl.plot(gewichten, sch_mvals)
pl.plot(gewichten, sch_vvals)
pl.plot(gewichten, fujivals)
pl.plot(gewichten, takavals)
pl.xlabel("Gewicht (kg)")
pl.ylabel("Huidoppervlak (cm^2)")
pl.title("bij een lengte van %s cm"%lengte);
###Output
_____no_output_____ |
entrega_apis.ipynb | ###Markdown
EXERCICIOS APIS
###Code
#importa as librerias necesarias para traballar con apis
#vamos a consultar unha api sobre a predicción meteoroloxica
#https://www.el-tiempo.net/api/json/v2/home
# Configura o endpoint
# Verifica o estado da resposta e explica o tipo de codigo
#provoca que fallo na peticion
# Comproba o tipo de servidor que está detrás da API e o tipo de contido
#Consulta a superficie de Manrresa(Provincia de Barcelona)
#compara con Alcala de Henares(Provincia de Madrid)
#Crea un dataFrame para mostraros datos
#Crear un dataFrame cas temperaturas do concello de Algadefe na provincia de Leon
# Agora vamos consultar a api da NASA
#url = 'https://api.nasa.gov/planetary/apod?'
#¿Esta api require autentificación? Mostra o mensaje de error
#Accede o total de horas de sol requeridas para Marte
#url='https://api.nasa.gov/insight_weather/?
#Usa a api da NASA para ver nunha determinada datas o acercamento de asteroides de entre 2021/01/01 e o 2021/01/07 obten o absolute_magnitude_h para ese intervalo de datas na hora cero
#url:https://api.nasa.gov/neo/rest/v1/feed?start_date=START_DATE&end_date=END_DATE&api_key=API_KEY
#crea un DataFrame cos datos anteriores
# Recapitulando:
# Query String Parámeters para configurar peticións GET
# Request Headers para configurar peticións GET
# Autenticación mediante parámetros
# Autenticación mediante headers
###Output
_____no_output_____ |
Package_Demos/.ipynb_checkpoints/Pandas_Lessons-checkpoint.ipynb | ###Markdown
Data From Excel
###Code
import numpy as np
# set seed
np.random.seed(111)
# Function to generate test data
def CreateDataSet(Number=1):
Output = []
for i in range(Number):
# Create a weekly (mondays) date range
rng = pd.date_range(start='1/1/2009', end='12/31/2012', freq='W-MON')
# Create random data
data = np.random.randint(low=25,high=1000,size=len(rng))
# Status pool
status = [1,2,3]
# Make a random list of statuses
random_status = [status[np.random.randint(low=0,high=len(status))] for i in range(len(rng))]
# State pool
states = ['GA','FL','fl','NY','NJ','TX']
# Make a random list of states
random_states = [states[np.random.randint(low=0,high=len(states))] for i in range(len(rng))]
Output.extend(zip(random_states, random_status, data, rng))
return Output
dataset = CreateDataSet(4)
df = pd.DataFrame(data=dataset, columns=['State','Status','CustomerCount','StatusDate'])
df.info()
df.head()
# Save results to excel
df.to_excel('Demo_Data.xlsx', index=False)
print('Done')
Location = 'Demo_Data.xlsx'
# Parse a specific sheet
df = pd.read_excel(Location, 0, index_col='StatusDate')
df.head()
# Clean State Column, convert to upper case
df['State'] = df.State.apply(lambda x: x.upper())
df['State'].unique()
## The semi Colon is important
df['CustomerCount'].plot(figsize=(15,5));
# Group by State and StatusDate
Daily = df.reset_index().groupby(['State','StatusDate']).sum()
Daily.loc['FL'].plot()
Daily.loc['GA'].plot()
Daily.loc['NY'].plot()
Daily.loc['NJ'].plot()
Daily.loc['TX'].plot();
###Output
_____no_output_____
###Markdown
Ways to Calculate Outliers
###Code
# Create a dataframe with dates as your index
States = ['NY', 'NY', 'NY', 'NY', 'FL', 'FL', 'GA', 'GA', 'FL', 'FL']
data = [1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10]
idx = pd.date_range('1/1/2012', periods=10, freq='MS')
df1 = pd.DataFrame(data, index=idx, columns=['Revenue'])
df1['State'] = States
# Create a second dataframe
data2 = [10.0, 10.0, 9, 9, 8, 8, 7, 7, 6, 6]
idx2 = pd.date_range('1/1/2013', periods=10, freq='MS')
df2 = pd.DataFrame(data2, index=idx2, columns=['Revenue'])
df2['State'] = States
# Combine dataframes
df = pd.concat([df1,df2])
df
#### Average and Standard Deviation only for Gaussian dis
###Output
_____no_output_____ |
Notebooks/Operadores.ipynb | ###Markdown
Operadores en Python Los operadores sirven para hacer cosas con las variables.Se pueden realizar operaciones como en una calculadora. Operadores aritméticos Son los que sirven para realizar operaciónes aritméticas básicas.
###Code
5+3
4*6
###Output
_____no_output_____
###Markdown
Las operaciones se resuelven siguiendo un orden: (* /) (+ -).En caso de empate (* /) o (+ -) se ejecuta de izquierda a derecha.Los paréntesis alteran el orden.
###Code
3+2*4
(3+2)*8
3*2/6
12/2*3
12/(2*3)
2+12/2*3
###Output
_____no_output_____
###Markdown
Elevar al cuadrado:
###Code
4**2
4**3
###Output
_____no_output_____
###Markdown
Raiz cuadrada:
###Code
4**(1/2)
64**0.5
###Output
_____no_output_____
###Markdown
Raiz cúbica...:
###Code
8**float(1/3)
###Output
_____no_output_____
###Markdown
Comprobar siempre las operaciones complejas, o asegurarse con paréntesis.Ejemplo para calcular la altura de un salto a partir del tiempo de vuelo de 0.493 s:
###Code
9.8*0.98**2/8 #incorrecto
(9.8*0.298**2)/8
###Output
_____no_output_____
###Markdown
Valor entero de una división (//). Redondea por debajo.
###Code
4//3
4/3
###Output
_____no_output_____
###Markdown
Módulo o residuo de una división (%).
###Code
4%3
###Output
_____no_output_____
###Markdown
Operadores de asignación El más básico es el signo **=**. Lo que hace es meter el valor de la derecha del signo igual dentro de la varaible de la iquierda.**No confundir con una comparación**, que se representa con un doble igual **==**.
###Code
a = 10
b = 2
c = a + b
print(c)
d = a**b
print(d)
###Output
100
###Markdown
Cuando se quiere modificar el valor de una variable mediante un operador, se puede hacer de dos formas:
###Code
a = 10
a = a + 5 #añade 5 al valor que ya tenía la variable a
print(a)
###Output
15
###Markdown
O de esta otra forma más simplificada.
###Code
b = 2
b += 3 #equivalente a b = b + 3
print(b)
###Output
5
###Markdown
También se pueden usar otros operadores de asignación: +=, -=, *=, /=, **=, //=
###Code
a = 3
a *= 4
print(a)
b = 3
b **= 3
print(b)
###Output
27
###Markdown
También se puede operar con las variables de texto.
###Code
a = 'hola'
b = 'cara'
c = 'cola'
d = a + b + c
print(d)
###Output
holacaracola
###Markdown
Se puede mejorar:
###Code
d = a + ' ' + b + c
print(d)
###Output
hola caracola
###Markdown
Los operadores con cadenas de texto no funcionan igual que con los números:
###Code
a*3
###Output
_____no_output_____
###Markdown
Operadores de comparación Realizan una comparación lógica y devuelven un valor booleano (True o False).Los operadores son: ==, !=, >, =, <=
###Code
a = 3; b = 2; c = 3
a == b
a != b
a == c
###Output
_____no_output_____
###Markdown
OJO, un error frecuente es utilizar = cuando se quiere comparar:
###Code
a=b #asigna el valor de b a la variable a, no las compara.
###Output
_____no_output_____
###Markdown
También se puede comparar con operadores booleanos (and, or, not)
###Code
a=True; b=False; c=True
a and b
a and c
a or b
not a
not b
not(a and b)
not(a and c)
mayor_edad = 18
edad_persona = 15
edad_persona >= mayor_edad
###Output
_____no_output_____ |
02. Iris Classification.ipynb | ###Markdown
神经网络实现鸢尾花分类利用神经网络实现鸢尾花分类,大致需要三个步骤 1. 准备数据 2. 搭建网络 3. 参数优化 1. 准备数据
###Code
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
1.1 数据集读入从 sklearn 的 datasets 中读入鸢尾花(iris)数据集,并获取数据集所有输入特征和标签
###Code
from sklearn import datasets
iris = datasets.load_iris()
x_data = iris.data
y_data = iris.target
###Output
_____no_output_____
###Markdown
1.2 数据集乱序通过设定相同的随机种子,使得打乱顺序后的输入特征和标签仍能一一对应
###Code
np.random.seed(666)
np.random.shuffle(x_data)
np.random.seed(666) # 每次乱序前都需要设定随机种子
np.random.shuffle(y_data)
tf.random.set_seed(666)
###Output
_____no_output_____
###Markdown
1.3 生成训练集和测试集数据个数为 150,[0,120] 为训练集,[121, 150] 为测试集,即训练集和测试集没有交集。
###Code
x_train = x_data[:-30]
y_train = y_data[:-30]
x_test = x_data[-30:]
y_test = y_data[-30:]
# 转换数据类型,否则后面矩阵相乘时会因为数据类型不一致而报错
x_train = tf.cast(x_train, tf.float32)
x_test = tf.cast(x_test, tf.float32)
###Output
_____no_output_____
###Markdown
1.4 配成(输入特征,标签)对,每次读入一小撮(batch)
###Code
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
###Output
_____no_output_____
###Markdown
2. 搭建网络 该神经网络只有输入层(4个神经元)和输出层(3个神经元),没有隐藏层。即该网络是具有 4 个输入特征的 3 分类问题。随机初始化权重(w1)和偏置(b1),符合均值为 0, 标准差为 0.1 的分布w1 为 4 行 3 列的张量,即 [输入节点个数,输出节点个数]b1 的值需要与输出节点个数一致,即 [3]
###Code
w1 = tf.Variable(tf.random.truncated_normal([4, 3], stddev=0.1))
b1 = tf.Variable(tf.random.truncated_normal([3], stddev=0.1))
###Output
_____no_output_____
###Markdown
3. 参数优化定义超参数
###Code
lr = 0.1 # 学习率为 0.1
epoch = 500 # 迭代 500 次
loss_all = 0 # 每轮 4 个 step, loss_all 记录 4 个 step 生成的 4 个 loss 的和
train_loss_results = [] # 记录每轮的损失函数 loss,用于后续绘制 loss 曲线
test_acc = [] # 记录每轮的准确率 acc,用于后续绘制 acc 曲线
###Output
_____no_output_____
###Markdown
嵌套循环迭代,在 with 结构中求得损失函数 loss 对每个可训练参数的偏导数,并更新这些可训练参数,最后显示当前的 loss
###Code
for i in range(epoch):
for step, (x_train, y_train) in enumerate(train_db):
with tf.GradientTape() as tape:
# y = XW + b
y = tf.matmul(x_train, w1) + b1
# 将计算结果传入激活函数 softmax,使输出符合概率分布
y = tf.nn.softmax(y)
# 将标签转换为 one-hot 编码,方便计算 loss 和
y_ = tf.one_hot(y_train, depth=3)
# 损失函数定义为均方误差 MSE
loss = tf.reduce_mean(tf.square(y_ - y))
# 将每个 step 计算出的 loss 累加,用于后续求 loss 平均值
loss_all += loss.numpy()
# 计算损失函数 loss 中各个参数的梯度
grads = tape.gradient(loss, [w1, b1])
# 梯度更新
# w1 = w1 - lr * w1_grads
w1.assign_sub(lr * grads[0])
# b1 = b1 - lr * b1_grads
b1.assign_sub(lr * grads[1])
# 每次迭代都打印损失函数 loss 的信息
print("Epoch {}, loss: {}".format(i, loss_all / 4))
# 记录每轮的平均损失
train_loss_results.append(loss_all / 4)
# loss_all 清零,方便下次迭代计算
loss_all = 0
# 测试训练效果,计算当前参数前向传播后的准确率
# 初始化预测正确样本个数和样本总数
total_correct, total_number = 0, 0
for x_test, y_test in test_db:
# 使用更新后的参数进行预测
y = tf.matmul(x_test, w1) + b1
y = tf.nn.softmax(y)
# 获取预测结果
pred = tf.argmax(y, axis=1)
# 保持运算数据类型一致
pred = tf.cast(pred, dtype=y_test.dtype)
# 将预测结果和正确结果进行比较
correct = tf.cast(tf.equal(pred, y_test), dtype=tf.int32)
# 统计预测正确的总和
correct = tf.reduce_sum(correct)
total_correct += int(correct)
total_number += x_test.shape[0]
acc = total_correct / total_number
test_acc.append(acc)
print("accuracy:", acc)
print('-----------------------------------')
###Output
Epoch 0, loss: 0.1989009529352188
accuracy: 0.5
-----------------------------------
Epoch 1, loss: 0.18588490411639214
accuracy: 0.6666666666666666
-----------------------------------
Epoch 2, loss: 0.17692162469029427
accuracy: 0.6666666666666666
-----------------------------------
Epoch 3, loss: 0.16934260353446007
accuracy: 0.6666666666666666
-----------------------------------
Epoch 4, loss: 0.16269876062870026
accuracy: 0.6666666666666666
-----------------------------------
Epoch 5, loss: 0.15691311284899712
accuracy: 0.6666666666666666
-----------------------------------
Epoch 6, loss: 0.15188119933009148
accuracy: 0.6666666666666666
-----------------------------------
Epoch 7, loss: 0.14749163016676903
accuracy: 0.6666666666666666
-----------------------------------
Epoch 8, loss: 0.14364120736718178
accuracy: 0.6666666666666666
-----------------------------------
Epoch 9, loss: 0.14024024456739426
accuracy: 0.6666666666666666
-----------------------------------
Epoch 10, loss: 0.13721336424350739
accuracy: 0.6666666666666666
-----------------------------------
Epoch 11, loss: 0.13449833542108536
accuracy: 0.6666666666666666
-----------------------------------
Epoch 12, loss: 0.13204422779381275
accuracy: 0.6666666666666666
-----------------------------------
Epoch 13, loss: 0.12980946153402328
accuracy: 0.6666666666666666
-----------------------------------
Epoch 14, loss: 0.12776010110974312
accuracy: 0.6666666666666666
-----------------------------------
Epoch 15, loss: 0.1258684080094099
accuracy: 0.6666666666666666
-----------------------------------
Epoch 16, loss: 0.1241115853190422
accuracy: 0.6666666666666666
-----------------------------------
Epoch 17, loss: 0.12247086502611637
accuracy: 0.6666666666666666
-----------------------------------
Epoch 18, loss: 0.12093068100512028
accuracy: 0.6666666666666666
-----------------------------------
Epoch 19, loss: 0.11947807855904102
accuracy: 0.6666666666666666
-----------------------------------
Epoch 20, loss: 0.11810222081840038
accuracy: 0.6666666666666666
-----------------------------------
Epoch 21, loss: 0.11679399944841862
accuracy: 0.6666666666666666
-----------------------------------
Epoch 22, loss: 0.11554572358727455
accuracy: 0.6666666666666666
-----------------------------------
Epoch 23, loss: 0.11435085535049438
accuracy: 0.6666666666666666
-----------------------------------
Epoch 24, loss: 0.11320381611585617
accuracy: 0.6666666666666666
-----------------------------------
Epoch 25, loss: 0.11209983751177788
accuracy: 0.6666666666666666
-----------------------------------
Epoch 26, loss: 0.1110348030924797
accuracy: 0.6666666666666666
-----------------------------------
Epoch 27, loss: 0.11000518314540386
accuracy: 0.6666666666666666
-----------------------------------
Epoch 28, loss: 0.10900786705315113
accuracy: 0.6666666666666666
-----------------------------------
Epoch 29, loss: 0.1080402061343193
accuracy: 0.6666666666666666
-----------------------------------
Epoch 30, loss: 0.10709983296692371
accuracy: 0.6666666666666666
-----------------------------------
Epoch 31, loss: 0.1061846911907196
accuracy: 0.6666666666666666
-----------------------------------
Epoch 32, loss: 0.10529298149049282
accuracy: 0.6666666666666666
-----------------------------------
Epoch 33, loss: 0.10442309081554413
accuracy: 0.6666666666666666
-----------------------------------
Epoch 34, loss: 0.10357359424233437
accuracy: 0.6666666666666666
-----------------------------------
Epoch 35, loss: 0.10274322889745235
accuracy: 0.6666666666666666
-----------------------------------
Epoch 36, loss: 0.10193088091909885
accuracy: 0.6666666666666666
-----------------------------------
Epoch 37, loss: 0.10113552585244179
accuracy: 0.6666666666666666
-----------------------------------
Epoch 38, loss: 0.10035625658929348
accuracy: 0.6666666666666666
-----------------------------------
Epoch 39, loss: 0.09959225729107857
accuracy: 0.6666666666666666
-----------------------------------
Epoch 40, loss: 0.09884278103709221
accuracy: 0.6666666666666666
-----------------------------------
Epoch 41, loss: 0.09810717031359673
accuracy: 0.6666666666666666
-----------------------------------
Epoch 42, loss: 0.09738478437066078
accuracy: 0.6666666666666666
-----------------------------------
Epoch 43, loss: 0.09667509980499744
accuracy: 0.6666666666666666
-----------------------------------
Epoch 44, loss: 0.09597758576273918
accuracy: 0.6666666666666666
-----------------------------------
Epoch 45, loss: 0.09529177658259869
accuracy: 0.6666666666666666
-----------------------------------
Epoch 46, loss: 0.09461724571883678
accuracy: 0.6666666666666666
-----------------------------------
Epoch 47, loss: 0.09395359642803669
accuracy: 0.6666666666666666
-----------------------------------
Epoch 48, loss: 0.09330045059323311
accuracy: 0.6666666666666666
-----------------------------------
Epoch 49, loss: 0.09265748411417007
accuracy: 0.7
-----------------------------------
Epoch 50, loss: 0.09202436544001102
accuracy: 0.7
-----------------------------------
Epoch 51, loss: 0.09140079282224178
accuracy: 0.7
-----------------------------------
Epoch 52, loss: 0.09078650176525116
accuracy: 0.7
-----------------------------------
Epoch 53, loss: 0.09018122591078281
accuracy: 0.7333333333333333
-----------------------------------
Epoch 54, loss: 0.0895847249776125
accuracy: 0.7333333333333333
-----------------------------------
Epoch 55, loss: 0.08899676613509655
accuracy: 0.7333333333333333
-----------------------------------
Epoch 56, loss: 0.08841713517904282
accuracy: 0.7333333333333333
-----------------------------------
Epoch 57, loss: 0.08784562721848488
accuracy: 0.7333333333333333
-----------------------------------
Epoch 58, loss: 0.08728203549981117
accuracy: 0.7333333333333333
-----------------------------------
Epoch 59, loss: 0.08672618307173252
accuracy: 0.7333333333333333
-----------------------------------
Epoch 60, loss: 0.08617790043354034
accuracy: 0.7333333333333333
-----------------------------------
Epoch 61, loss: 0.08563700318336487
accuracy: 0.7333333333333333
-----------------------------------
Epoch 62, loss: 0.0851033441722393
accuracy: 0.7333333333333333
-----------------------------------
Epoch 63, loss: 0.08457676880061626
accuracy: 0.7333333333333333
-----------------------------------
Epoch 64, loss: 0.08405712805688381
accuracy: 0.7666666666666667
-----------------------------------
Epoch 65, loss: 0.0835442766547203
accuracy: 0.7666666666666667
-----------------------------------
Epoch 66, loss: 0.08303808979690075
accuracy: 0.7666666666666667
-----------------------------------
Epoch 67, loss: 0.08253841660916805
accuracy: 0.7666666666666667
-----------------------------------
Epoch 68, loss: 0.08204514347016811
accuracy: 0.7666666666666667
-----------------------------------
Epoch 69, loss: 0.08155814744532108
accuracy: 0.7666666666666667
-----------------------------------
Epoch 70, loss: 0.081077316775918
accuracy: 0.8
-----------------------------------
Epoch 71, loss: 0.08060253039002419
accuracy: 0.8
-----------------------------------
Epoch 72, loss: 0.08013367280364037
accuracy: 0.8
-----------------------------------
Epoch 73, loss: 0.07967064715921879
accuracy: 0.8
-----------------------------------
Epoch 74, loss: 0.0792133528739214
accuracy: 0.8
-----------------------------------
Epoch 75, loss: 0.07876167446374893
accuracy: 0.8
-----------------------------------
Epoch 76, loss: 0.07831553183495998
accuracy: 0.8
-----------------------------------
Epoch 77, loss: 0.07787481136620045
accuracy: 0.8
-----------------------------------
Epoch 78, loss: 0.0774394404143095
accuracy: 0.8
-----------------------------------
Epoch 79, loss: 0.07700931373983622
accuracy: 0.8
-----------------------------------
Epoch 80, loss: 0.07658434379845858
accuracy: 0.8
-----------------------------------
Epoch 81, loss: 0.07616446260362864
accuracy: 0.8
-----------------------------------
Epoch 82, loss: 0.07574956677854061
accuracy: 0.8333333333333334
-----------------------------------
Epoch 83, loss: 0.07533958368003368
accuracy: 0.8333333333333334
-----------------------------------
Epoch 84, loss: 0.07493443042039871
###Markdown
绘制损失函数 loss 和准确率 acc 的变化曲线
###Code
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
绘制损失函数 loss 的曲线
###Code
plt.title("Loss Function Curce")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.plot(train_loss_results, label="$Loss$")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
绘制准确率 acc 的曲线
###Code
plt.title("Accuracy Curve")
plt.xlabel("Epoch")
plt.ylabel("Acc")
plt.plot(test_acc, label="$Accuracy$")
plt.legend()
plt.show()
###Output
_____no_output_____ |
notebooks/GTDB_release95/02_struo_version_db-create_benchmarking/01_benchmarking_UniRef50-90_db-create.ipynb | ###Markdown
Table of Contents1 Goal2 Var3 Init4 Load5 n1005.1 Struo15.1.1 Kraken/Bracken + UniRef505.1.1.1 Config5.1.1.2 Run5.1.2 UniRef905.1.2.1 Config5.1.2.2 Run5.2 Struo25.2.1 Config5.2.2 Run6 n5006.1 Struo16.1.1 Kraken/Bracken + UniRef506.1.1.1 Config6.1.1.2 Run6.1.2 UniRef906.1.2.1 Config6.1.2.2 Run6.2 Struo26.2.1 Config6.2.2 Run7 n10007.1 Struo17.1.1 Kraken/Bracken + UniRef507.1.1.1 Config7.1.1.2 Run7.1.2 UniRef907.1.2.1 Config7.1.2.2 Run7.2 Struo27.2.1 Config7.2.2 Run8 Summary8.1 CPU hours8.2 Genes annotated8.2.1 Struo18.2.2 Struo29 sessionInfo Goal* Comparing how long Struo v1 & Struo v2 take to annotate both UniRef50 + UniRef90 sequences Var
###Code
work_dir = '/ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/'
# GTDB metadata (1 genome per species)
metadata_file = '/ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/metadata_1per-GTDB-Spec_gte50comp-lt5cont_wtaxID_wPath.tsv'
# mapping
map_dir = '/ebio/abt3_projects/databases_no-backup/humann3/201901/utility_mapping/'
map_pfam_file = file.path(map_dir, 'map_pfam_uniref50.txt.gz')
map_eggnog_file = file.path(map_dir, 'map_eggnog_uniref50.txt.gz')
map_go_file = file.path(map_dir, 'map_go_uniref50.txt.gz')
# params
threads = 8
###Output
_____no_output_____
###Markdown
Init
###Code
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(tidytable)
library(LeyLabRMisc)
set.seed(6471)
setDTthreads(threads)
make_dir(work_dir)
df.dims()
###Output
Directory already exists: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/
###Markdown
Load
###Code
meta = Fread(metadata_file)
meta
###Output
_____no_output_____
###Markdown
n100
###Code
to_keep = sample(1:nrow(meta), 100)
meta_f = meta %>%
slice.(rows=to_keep)
meta_f
F = file.path(work_dir, 'n100', 'genome_meta_n100.tsv')
meta_f %>% write_table(F)
###Output
File written: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/genome_meta_n100.tsv
###Markdown
Struo1 Kraken/Bracken + UniRef50 Config
###Code
config_file = file.path(work_dir, 'n100', 'struo1', 'config_uniref50.yaml')
cat_file(config_file)
###Output
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/genome_meta_n100.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'ncbi_species_taxid'
taxonomy_col: 'ncbi_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/struo1/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Create
bracken: Create
humann2_bowtie2: Create
humann2_diamond: Create
# output database name
db_name: GTDB-custom
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
use_ancient: True
#-- if custom NCBI taxdump files (or just Skip) --#
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# `vsearch_all` = all genes clustered (including `humann2_nuc_seqs` & `humann2_prot_seqs`)
params:
bracken_build_kmer: 35
bracken_build_read_lens:
- 100
- 150
prodigal: ""
diamond_db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref50/uniref50_201901.dmnd
diamond_db_to_mem: True
diamond: --evalue 1e-3 --sensitive --query-cover 80 --id 50 --max-target-seqs 20 --block-size 3 --index-chunks 2
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
vsearch_all: Skip #--id 1.0 --strand both --qmask none --fasta_width 0
#-- If adding genes to humann2 database --#
# If you have nucleotid and/or protein gene sequences formatted for humann2,
# provide the file paths to the fasta files below (gzip'ed)
humann2_nuc_seqs: Skip
humann2_prot_seqs: Skip
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/Struo$ screen -L -S struo-bmk ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/struo1/config_uniref50.yaml 50 -F``` UniRef90 Config
###Code
config_file = file.path(work_dir, 'n100', 'struo1', 'config_uniref90.yaml')
cat_file(config_file)
###Output
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/genome_meta_n100.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'ncbi_species_taxid'
taxonomy_col: 'ncbi_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/struo1/uniref90/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Skip #Create
bracken: Skip #Create
humann2_bowtie2: Create
humann2_diamond: Create
# output database name
db_name: GTDB-custom
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
use_ancient: True
#-- if custom NCBI taxdump files (or just Skip) --#
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# `vsearch_all` = all genes clustered (including `humann2_nuc_seqs` & `humann2_prot_seqs`)
params:
bracken_build_kmer: 35
bracken_build_read_lens:
- 100
- 150
prodigal: ""
diamond_db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref90/uniref90_201901.dmnd
diamond_db_to_mem: True
diamond: --evalue 1e-3 --sensitive --query-cover 80 --id 90 --max-target-seqs 20 --block-size 3 --index-chunks 2
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
vsearch_all: Skip #--id 1.0 --strand both --qmask none --fasta_width 0
#-- If adding genes to humann2 database --#
# If you have nucleotid and/or protein gene sequences formatted for humann2,
# provide the file paths to the fasta files below (gzip'ed)
humann2_nuc_seqs: Skip
humann2_prot_seqs: Skip
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/Struo$ screen -L -S struo-bmk ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/struo1/config_uniref90.yaml 50 -F``` Struo2 Config
###Code
config_file = file.path(work_dir, 'n100', 'struo2', 'config_db-create.yaml')
cat_file(config_file)
###Output
#-- email notifications of pipeline success/failure (use "Skip" to deactivate) --#
email: [email protected]
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/genome_meta_n100.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
accession_col: 'accession'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'gtdb_taxid' # or 'ncbi_species_taxid'
taxonomy_col: 'gtdb_taxonomy' # or 'ncbi_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/struo2/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Create
bracken: Create
genes: Create
humann3_bowtie2: Create
humann3_diamond: Create
metaphlan3: Skip #Create
# Name of UniRef clustering (uniref90 or uniref50)
## uniref90 highly recommended
uniref_name: uniref90
# Name of the humann3 diamond database to be created
## This must match naming allowed by humann3
dmnd_name: uniref90_201901.dmnd
# Index mapping UniRef90 clusters to UniRef50 (saves time vs re-annotating)
## This is skipped if annotating with UniRef50
cluster_idx: /ebio/abt3_projects/databases_no-backup/uniref/2019.01/uniref50-90.pkl
#-- if custom NCBI taxdump files (or just Skip) --#
# Used for kraken taxonomy & metaphlan
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# for humann3, use either mmseqs or diamond (mmseqs gets priority if neither skipped)
# for humann3::mmseqs_search::run, --num-iterations must be >=2
params:
ionice: -c 3
bracken:
build_kmer: 35
build_read_lens:
- 100
- 150
genes:
prodigal: ""
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
mmseqs_cluster: --min-seq-id 0.9 -c 0.8
mmseqs_cluster_method: linclust # or "cluster", which is slower
humann3:
batches: 100
mmseqs_search:
db: /ebio/abt3_projects/databases_no-backup/mmseqs/UniRef90/2019_01/uniref90
run: -e 1e-3 --max-accept 1 --max-seqs 100 --num-iterations 2 --start-sens 1 --sens-steps 3 -s 6
diamond:
db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref90/uniref90_201901.dmnd
run: Skip #--evalue 1e-3 --query-cover 80 --id 90 --max-target-seqs 1 --block-size 4 --index-chunks 2
propagate_annotations: --min-cov 80 --min-pident 90
metaphlan3:
pkl: /ebio/abt3_projects/databases_no-backup/metaphlan3/201901/mpa_v30_CHOCOPhlAn_201901.pkl
fasta: /ebio/abt3_projects/databases_no-backup/metaphlan3/201901/mpa_v30_CHOCOPhlAn_201901.fna.bz2
species_specific_markers: --ext-abs-cutoff 10 --ext-rel-cutoff 0.1
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
name: Struo2_db-create
config: create
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/struo2_1$ screen -L -S struo2-bmk-n100 ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n100/struo2/config_db-create.yaml 50 -F``` n500
###Code
#' selecting genomes
to_keep = sample(1:nrow(meta), 500)
meta_f = meta %>%
slice.(rows=to_keep)
meta_f
F = file.path(work_dir, 'n500')
make_dir(F)
F = file.path(work_dir, 'n500', 'genome_meta_n500.tsv')
meta_f %>% write_table(F)
###Output
Created directory: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500
File written: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/genome_meta_n500.tsv
###Markdown
Struo1 Kraken/Bracken + UniRef50 Config
###Code
config_file = file.path(work_dir, 'n500', 'struo1', 'config_uniref50.yaml')
cat_file(config_file)
###Output
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/genome_meta_n500.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'ncbi_species_taxid'
taxonomy_col: 'ncbi_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/struo1/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Create
bracken: Create
humann2_bowtie2: Create
humann2_diamond: Create
# output database name
db_name: GTDB-custom
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
use_ancient: True
#-- if custom NCBI taxdump files (or just Skip) --#
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# `vsearch_all` = all genes clustered (including `humann2_nuc_seqs` & `humann2_prot_seqs`)
params:
bracken_build_kmer: 35
bracken_build_read_lens:
- 100
- 150
prodigal: ""
diamond_db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref50/uniref50_201901.dmnd
diamond_db_to_mem: True
diamond: --evalue 1e-3 --sensitive --query-cover 80 --id 50 --max-target-seqs 20 --block-size 3 --index-chunks 2
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
vsearch_all: Skip #--id 1.0 --strand both --qmask none --fasta_width 0
#-- If adding genes to humann2 database --#
# If you have nucleotid and/or protein gene sequences formatted for humann2,
# provide the file paths to the fasta files below (gzip'ed)
humann2_nuc_seqs: Skip
humann2_prot_seqs: Skip
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/Struo$ screen -L -S struo1-bmk-n500 ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/struo1/config_uniref50.yaml 50 -F``` UniRef90 Config
###Code
config_file = file.path(work_dir, 'n500', 'struo1', 'config_uniref90.yaml')
cat_file(config_file)
###Output
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/genome_meta_n500.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'gtdb_taxid'
taxonomy_col: 'gtdb_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/struo1/uniref90/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Skip #Create
bracken: Skip #Create
humann2_bowtie2: Create
humann2_diamond: Create
# output database name
db_name: GTDB-custom
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
use_ancient: True
#-- if custom NCBI taxdump files (or just Skip) --#
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# `vsearch_all` = all genes clustered (including `humann2_nuc_seqs` & `humann2_prot_seqs`)
params:
bracken_build_kmer: 35
bracken_build_read_lens:
- 100
- 150
prodigal: ""
diamond_db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref90/uniref90_201901.dmnd
diamond_db_to_mem: True
diamond: --evalue 1e-3 --sensitive --query-cover 80 --id 90 --max-target-seqs 20 --block-size 3 --index-chunks 2
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
vsearch_all: Skip #--id 1.0 --strand both --qmask none --fasta_width 0
#-- If adding genes to humann2 database --#
# If you have nucleotid and/or protein gene sequences formatted for humann2,
# provide the file paths to the fasta files below (gzip'ed)
humann2_nuc_seqs: Skip
humann2_prot_seqs: Skip
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/Struo$ screen -L -S struo1-bmk-n500 ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/struo1/config_uniref90.yaml 50 -F``` Struo2 Config
###Code
config_file = file.path(work_dir, 'n500', 'struo2', 'config_db-create.yaml')
cat_file(config_file)
###Output
#-- email notifications of pipeline success/failure (use "Skip" to deactivate) --#
email: [email protected]
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/genome_meta_n500.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
accession_col: 'accession'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'gtdb_taxid' # or 'ncbi_species_taxid'
taxonomy_col: 'gtdb_taxonomy' # or 'ncbi_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/struo2/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Create
bracken: Create
genes: Create
humann3_bowtie2: Create
humann3_diamond: Create
metaphlan3: Skip #Create
# Name of UniRef clustering (uniref90 or uniref50)
## uniref90 highly recommended
uniref_name: uniref90
# Name of the humann3 diamond database to be created
## This must match naming allowed by humann3
dmnd_name: uniref90_201901.dmnd
# Index mapping UniRef90 clusters to UniRef50 (saves time vs re-annotating)
## This is skipped if annotating with UniRef50
cluster_idx: /ebio/abt3_projects/databases_no-backup/uniref/2019.01/uniref50-90.pkl
#-- if custom NCBI taxdump files (or just Skip) --#
# Used for kraken taxonomy & metaphlan
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# for humann3, use either mmseqs or diamond (mmseqs gets priority if neither skipped)
# for humann3::mmseqs_search::run, --num-iterations must be >=2
params:
ionice: -c 3
bracken:
build_kmer: 35
build_read_lens:
- 100
- 150
genes:
prodigal: ""
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
mmseqs_cluster: --min-seq-id 0.9 -c 0.8
mmseqs_cluster_method: linclust # or "cluster", which is slower
humann3:
batches: 100
mmseqs_search:
db: /ebio/abt3_projects/databases_no-backup/mmseqs/UniRef90/2019_01/uniref90
run: -e 1e-3 --max-accept 1 --max-seqs 100 --num-iterations 2 --start-sens 1 --sens-steps 3 -s 6
diamond:
db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref90/uniref90_201901.dmnd
run: Skip #--evalue 1e-3 --query-cover 80 --id 90 --max-target-seqs 1 --block-size 4 --index-chunks 2
propagate_annotations: --min-cov 80 --min-pident 90
metaphlan3:
pkl: /ebio/abt3_projects/databases_no-backup/metaphlan3/201901/mpa_v30_CHOCOPhlAn_201901.pkl
fasta: /ebio/abt3_projects/databases_no-backup/metaphlan3/201901/mpa_v30_CHOCOPhlAn_201901.fna.bz2
species_specific_markers: --ext-abs-cutoff 10 --ext-rel-cutoff 0.1
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
name: Struo2_db-create
config: create
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/struo2_1$ screen -L -S struo2-bmk-n500 ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n500/struo2/config_db-create.yaml 50 -F``` n1000
###Code
to_keep = sample(1:nrow(meta), 1000)
meta_f = meta %>%
slice.(rows=to_keep)
meta_f
F = file.path(work_dir, 'n1000', 'genome_meta_n1000.tsv')
meta_f %>% write_table(F)
###Output
File written: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/genome_meta_n1000.tsv
###Markdown
Struo1 Kraken/Bracken + UniRef50 Config
###Code
file.path(work_dir, 'n1000', 'struo1', 'config_uniref50.yaml')
config_file = file.path(work_dir, 'n1000', 'struo1', 'config_uniref50.yaml')
cat_file(config_file)
###Output
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/genome_meta_n1000.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'gtdb_taxid'
taxonomy_col: 'gtdb_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/struo1/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Create
bracken: Create
humann2_bowtie2: Create
humann2_diamond: Create
# output database name
db_name: GTDB-custom
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
use_ancient: True
#-- if custom NCBI taxdump files (or just Skip) --#
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# `vsearch_all` = all genes clustered (including `humann2_nuc_seqs` & `humann2_prot_seqs`)
params:
bracken_build_kmer: 35
bracken_build_read_lens:
- 100
- 150
prodigal: ""
diamond_db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref50/uniref50_201901.dmnd
diamond_db_to_mem: True
diamond: --evalue 1e-3 --sensitive --query-cover 80 --id 50 --max-target-seqs 20 --block-size 3 --index-chunks 2
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
vsearch_all: Skip #--id 1.0 --strand both --qmask none --fasta_width 0
#-- If adding genes to humann2 database --#
# If you have nucleotid and/or protein gene sequences formatted for humann2,
# provide the file paths to the fasta files below (gzip'ed)
humann2_nuc_seqs: Skip
humann2_prot_seqs: Skip
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/Struo$ screen -L -S struo1-bmk-n1k ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/struo1/config_uniref50.yaml 50 -F``` UniRef90 Config
###Code
config_file = file.path(work_dir, 'n1000', 'struo1', 'config_uniref90.yaml')
cat_file(config_file)
###Output
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/genome_meta_n1000.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'gtdb_taxid'
taxonomy_col: 'gtdb_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/struo1/uniref90/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Skip #Create
bracken: Skip #Create
humann2_bowtie2: Create
humann2_diamond: Create
# output database name
db_name: GTDB-custom
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
use_ancient: False
#-- if custom NCBI taxdump files (or just Skip) --#
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# `vsearch_all` = all genes clustered (including `humann2_nuc_seqs` & `humann2_prot_seqs`)
params:
bracken_build_kmer: 35
bracken_build_read_lens:
- 100
- 150
prodigal: ""
diamond_db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref90/uniref90_201901.dmnd
diamond_db_to_mem: True
diamond: --evalue 1e-3 --sensitive --query-cover 80 --id 90 --max-target-seqs 20 --block-size 3 --index-chunks 2
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
vsearch_all: Skip #--id 1.0 --strand both --qmask none --fasta_width 0
#-- If adding genes to humann2 database --#
# If you have nucleotid and/or protein gene sequences formatted for humann2,
# provide the file paths to the fasta files below (gzip'ed)
humann2_nuc_seqs: Skip
humann2_prot_seqs: Skip
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/Struo$ screen -L -S struo1-bmk-n1000 ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/struo1/config_uniref90.yaml 50 -F``` Struo2 Config
###Code
config_file = file.path(work_dir, 'n1000', 'struo2', 'config_db-create.yaml')
cat_file(config_file)
###Output
#-- email notifications of pipeline success/failure (use "Skip" to deactivate) --#
email: [email protected]
#-- I/O --#
# file listing samples and associated data
samples_file: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/genome_meta_n1000.tsv
## column names in samples table
samples_col: 'ncbi_organism_name'
accession_col: 'accession'
fasta_file_path_col: 'fasta_file_path'
taxID_col: 'gtdb_taxid' # or 'ncbi_species_taxid'
taxonomy_col: 'gtdb_taxonomy' # or 'ncbi_taxonomy'
# output location
output_dir: /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/struo2/
# temporary file directory (your username will be added automatically)
tmp_dir: /ebio/abt3_scratch/
#-- databases to create --#
# Replace "Create" with "Skip" to skip creation of any of these
# Note that braken relies on the kraken2 database
databases:
kraken2: Create
bracken: Create
genes: Create
humann3_bowtie2: Create
humann3_diamond: Create
metaphlan3: Skip #Create
# Name of UniRef clustering (uniref90 or uniref50)
## uniref90 highly recommended
uniref_name: uniref90
# Name of the humann3 diamond database to be created
## This must match naming allowed by humann3
dmnd_name: uniref90_201901.dmnd
# Index mapping UniRef90 clusters to UniRef50 (saves time vs re-annotating)
## This is skipped if annotating with UniRef50
cluster_idx: /ebio/abt3_projects/databases_no-backup/uniref/2019.01/uniref50-90.pkl
#-- if custom NCBI taxdump files (or just Skip) --#
# Used for kraken taxonomy & metaphlan
names_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/names.dmp
nodes_dmp: /ebio/abt3_projects/databases_no-backup/GTDB/release95/taxdump/nodes.dmp
#-- keep intermediate files required for re-creating DBs (eg., w/ more genomes) --#
# If "True", the intermediate files are saved to `output_dir`
# Else, the intermediate files are temporarily stored in `temp_folder`
keep_intermediate: True
#-- software parameters --#
# `vsearch_per_genome` = per-genome gene clustering
# for humann3, use either mmseqs or diamond (mmseqs gets priority if neither skipped)
# for humann3::mmseqs_search::run, --num-iterations must be >=2
params:
ionice: -c 3
bracken:
build_kmer: 35
build_read_lens:
- 100
- 150
genes:
prodigal: ""
vsearch_per_genome: --id 0.97 --strand both --qmask none --fasta_width 0
mmseqs_cluster: --min-seq-id 0.9 -c 0.8
mmseqs_cluster_method: linclust # or "cluster", which is slower
humann3:
batches: 100
mmseqs_search:
db: /ebio/abt3_projects/databases_no-backup/mmseqs/UniRef90/2019_01/uniref90
run: -e 1e-3 --max-accept 1 --max-seqs 100 --num-iterations 2 --start-sens 1 --sens-steps 3 -s 6
diamond:
db: /ebio/abt3_projects/databases_no-backup/humann3/201901/uniref90/uniref90_201901.dmnd
run: Skip #--evalue 1e-3 --query-cover 80 --id 90 --max-target-seqs 1 --block-size 4 --index-chunks 2
propagate_annotations: --min-cov 80 --min-pident 90
metaphlan3:
pkl: /ebio/abt3_projects/databases_no-backup/metaphlan3/201901/mpa_v30_CHOCOPhlAn_201901.pkl
fasta: /ebio/abt3_projects/databases_no-backup/metaphlan3/201901/mpa_v30_CHOCOPhlAn_201901.fna.bz2
species_specific_markers: --ext-abs-cutoff 10 --ext-rel-cutoff 0.1
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: ./bin/scripts/
name: Struo2_db-create
config: create
###Markdown
Run```(snakemake) @ rick:/ebio/abt3_projects/databases_no-backup/bin/struo2_1$ screen -L -S struo2-bmk-n1k ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/GTDB/release95/Struo/benchmarking/db_create/UniRef50-90/n1000/struo2/config_db-create.yaml 50 -F``` Summary CPU hours
###Code
#' reading in benchmark all files
read_bmk = function(F){
df = read.delim(F, sep='\t')
df$file = basename(F)
df$rule = basename(dirname(F))
return(df)
}
#' listing & reading in all benchmark files from a base path
read_bmks = function(F){
df = list_files(F, '*.txt') %>%
lapply(read_bmk) %>%
do.call(rbind, .)
if(grepl('uniref[0-9]+/benchmarks', F)){
df$dataset = F %>% dirname %>% dirname %>% dirname %>% basename
df$pipeline = F %>% dirname %>% dirname %>% basename
} else {
df$dataset = F %>% dirname %>% dirname %>% basename
df$pipeline = F %>% dirname %>% basename
}
return(df)
}
# reading in all benchmark files
L = list(
file.path(work_dir, 'n100', 'struo1', 'benchmarks'),
file.path(work_dir, 'n100', 'struo1', 'uniref90', 'benchmarks'),
file.path(work_dir, 'n500', 'struo1', 'benchmarks'),
file.path(work_dir, 'n500', 'struo1', 'uniref90', 'benchmarks'),
file.path(work_dir, 'n1000', 'struo1', 'benchmarks'),
file.path(work_dir, 'n1000', 'struo1', 'uniref90', 'benchmarks'),
file.path(work_dir, 'n100', 'struo2', 'benchmarks'),
file.path(work_dir, 'n500', 'struo2', 'benchmarks'),
file.path(work_dir, 'n1000', 'struo2', 'benchmarks')
)
bmk = L %>%
lapply(read_bmks) %>%
do.call(rbind, .)
bmk
# records per dataset
bmk %>%
group_by(dataset, pipeline) %>%
summarize(n = n(), .groups='drop')
#' cpu hours
bmk_s = bmk %>%
group_by(dataset, pipeline) %>%
summarize(cpu_hours = sum(s) / (60 * 60),
.groups='drop')
df.dims(20)
bmk_s %>% arrange(dataset, pipeline)
df.dims()
#' cpu hours: plotting
p = bmk_s %>%
mutate(pipeline = gsub('^s', 'S', pipeline),
dataset = gsub('^n', '', dataset) %>% as.Num) %>%
ggplot(aes(dataset, cpu_hours, color=pipeline)) +
geom_smooth(method='lm', level = 0.9, alpha = 0.2, size=0.5, linetype='dashed') +
geom_point() +
scale_color_discrete('Struo\nVersion') +
scale_x_continuous(breaks=c(100, 500, 1000)) +
labs(x='No. of genomes', y='CPU hours') +
theme_bw()
p.dims(5,3)
plot(p)
# linear model
bmk_s %>%
filter(pipeline == 'struo1') %>%
mutate(dataset = gsub('^n', '', dataset) %>% as.Num) %>%
lm(cpu_hours ~ dataset, data=.) %>%
summary
# time for all GTDBr95 genomes (n = 30989)
45.68 + 2.44119 * 30989
# linear model
bmk_s %>%
filter(pipeline == 'struo2') %>%
mutate(dataset = gsub('^n', '', dataset) %>% as.Num) %>%
lm(cpu_hours ~ dataset, data=.) %>%
summary
# time for all GTDBr95 genomes (n = 30989)
46.83 + 0.66850 * 30989
###Output
_____no_output_____
###Markdown
Genes annotated Struo1
###Code
F = file.path(work_dir, 'n100', 'struo1', 'humann2', 'GTDB-custom', 'all_genes_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
F = file.path(work_dir, 'n100', 'struo1', 'uniref90', 'humann2', 'GTDB-custom', 'all_genes_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
F = file.path(work_dir, 'n500', 'struo1', 'humann2', 'GTDB-custom', 'all_genes_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
F = file.path(work_dir, 'n500', 'struo1', 'uniref90', 'humann2', 'GTDB-custom', 'all_genes_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
F = file.path(work_dir, 'n1000', 'struo1', 'humann2', 'GTDB-custom', 'all_genes_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
F = file.path(work_dir, 'n1000', 'struo1', 'uniref90', 'humann2', 'GTDB-custom', 'all_genes_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
###Output
2227466
###Markdown
Struo2
###Code
F = file.path(work_dir, 'n100', 'struo2', 'humann3', 'uniref90', 'genome_reps_filt_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
F = file.path(work_dir, 'n500', 'struo2', 'humann3', 'uniref90', 'genome_reps_filt_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
F = file.path(work_dir, 'n1000', 'struo2', 'humann3', 'uniref90', 'genome_reps_filt_annot.faa.gz')
cmd = glue::glue("gunzip -c {F} | grep -c '>'", F=F)
bash_job(cmd, conda_env = 'base')
# formatting
df = data.frame(n_genomes = c(100, 500, 1000),
Struo1 = c(224221, 1109346, 2227466),
Struo2 = c(228222, 1130013, 2270402)) %>%
gather(version, genes_annot, -n_genomes)
df
# plotting
p = df %>%
mutate(n_genomes = n_genomes %>% as.character %>% reorder(n_genomes %>% as.Num)) %>%
ggplot(aes(n_genomes, genes_annot, fill=version)) +
geom_bar(stat='identity', position='dodge') +
scale_fill_discrete('Struo\nversion') +
labs(y='No. of annotated genes', x='No. of genomes') +
theme_bw()
p.dims(4,2.2)
plot(p)
###Output
_____no_output_____
###Markdown
sessionInfo
###Code
pipelineInfo('/ebio/abt3_projects/databases_no-backup/bin/Struo/')
pipelineInfo('/ebio/abt3_projects/databases_no-backup/bin/struo2_1')
sessionInfo()
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/final_plots_for_introduction-checkpoint.ipynb | ###Markdown
spectral standards
###Code
#spectral standards
regions=[[1.246, 1.295],[1.15, 1.20], [1.62,1.67], [1.56, 1.61], [1.38, 1.43]]
st_spts=[splat.typeToNum(x) for x in np.arange(15, 40)]
standards=[splat.getStandard(x) for x in st_spts]
for s in standards: s.normalize()
minima = 15.0
maxima = 40.0
splt.plotSpectrum(standards, figsize=(8, 12), bands=regions,
xrange=[1.1, 1.7], yrange=[0.0, 4.0]
, ylabel='Normalized Flux +constant',
bandalpha=0.2,bandlabels=['J-cont', '$H_2O-1$', '$CH_4$', 'H-cont',
'$H_2O-2$'],
legendLocation='outside',
#features=['H2O', 'CH4','TiO', 'VO', 'FeH', 'H2', 'KI', 'NaI'],
bandlabelpositions=['top', 'top', 'top', 'top', 'top'], fontsize=38,
#alpha=np.arange(15.0, 40.0)/40.0,
labels=st_spts, colors=wisps.Annotator.color_from_spts(st_spts, cmap='viridis'),
alpha=14.0,
s=14.0,
fontscale = 1.0,
stack=0.1,
grid=False,
filename=wisps.OUTPUT_FIGURES+'/standards.pdf')
plt.show()
###Output
_____no_output_____
###Markdown
spex sample
###Code
spex_data=wisps.spex_sample_ids(stype='spex_sample', from_file=True)
fig, (ax, ax1)=plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(8,8))
df=spex_data[['Spts', 'Snr']]
df.Snr=np.log10(df.Snr)
df=df[df.Snr.between(0.01, 5.0)]
ax.hist2d(x=df.Spts,y=df.Snr, bins=10, cmap=wisps.MYCOLORMAP)
ax1.hist(spex_data.Spts, color='#AAAAAA')
ax1.set_xticks([17.0, 20.0, 25.0, 30.0, 35.0, 39.0])
ax1.set_xticklabels(['M.7','L0.0', 'L5.0', 'T0.0', 'T5.0', 'T9.0'], fontdict={'fontsize': 18,
'fontweight' : 18})
ax1.set_xticks(np.arange(17.0, 39.0, 1), minor=True)
ax1.set_yticks(np.arange(0.0, 800, 50), minor=True)
ax.set_yticks(np.arange(0.4, 3.4, 0.1), minor=True)
ax1.set_xlabel('Spectral Type', fontsize=18)
ax.set_ylabel('Log Snr', fontsize=18)
ax1.set_ylabel('Number', fontsize=18)
ax.yaxis.grid(True)
plt.savefig(wisps.OUTPUT_FIGURES+'/spexsample.pdf', bbox_inches='tight')
df=wisps.datasets['aegis_cosmos'].replace(np.inf, np.nan).dropna(how='any').reindex()
fig, ax=plt.subplots()
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
sns.distplot(df.f_test, ax=ax)
ax.set_xlabel('F(x)')
plt.savefig(wisps.OUTPUT_FIGURES+'/snr_f_dist.pdf', bbox_inches='tight')
len(spex_data)
###Output
_____no_output_____
###Markdown
Filters
###Code
import splat.photometry as sphot
t0=splat.Spectrum(splat.STDS_DWARF_SPEX_KEYS['T0.0'])
f140=sphot.filterProfile('NICMOS F140W')
f160=sphot.filterProfile('NICMOS F160W')
f110=sphot.filterProfile('NICMOS F110W')
j_filt=sphot.filterProfile('2MASS J')
h_filt=sphot.filterProfile('2MASS H')
t0.scale(np.nanmax(f140[1]))
t0.shortname
cmap=plt.cm.viridis
fig, ax=plt.subplots(figsize=(8, 6))
#l1=ax.plot(t0.wave, t0.flux, c='k')
l2=ax.plot( f160[0], 0.1+f160[1], c=cmap(0.3), alpha=0.8, ms=2, marker='*')
l3=ax.plot( f140[0], 0.4+f140[1], c=cmap(.5), alpha=0.8, ms=2, marker='.')
l4=ax.plot( f110[0], 0.7+f110[1], c=cmap(1.0), alpha=0.8, ms=2, marker='s')
l5=ax.plot( j_filt[0], 1.0+j_filt[1], c=cmap(1.5), linestyle='--', ms=2, alpha=1)
ax.plot(h_filt[0], 1.3+h_filt[1], c='#111111', linestyle='--', alpha=1)
ax.set_xlim([1.1, 1.75])
ax.set_xlabel('Wavelength ( $\mu m$)', fontsize=18)
ax.set_ylabel('Sensitivity+c', fontsize=18)
ax.set_xticks(np.arange(1.1, 1.7, 0.05), minor=True)
plt.legend(['F160W', 'F140W','F110W','2MASS J ', '2MASS H'], bbox_to_anchor=(1., .5),fontsize=18)
plt.savefig(wisps.OUTPUT_FIGURES+'/filter_profiles.pdf', bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Sky positions
###Code
t=pd.read_csv(wisps.OUTPUT_FILES+'/observation_log.csv').drop_duplicates(subset='POINTING')
t_wisp=t[t.POINTING.str.contains('wisp*')]
t_hst3d=t[t.POINTING.str.contains('wisp*').apply(lambda x: not x)]
from astropy.coordinates import SkyCoord
import astropy.units as u
coords1=SkyCoord(ra=t_wisp['RA (deg)'],dec=t_wisp['DEC(deg)'], unit=(u.deg, u.deg))
coords2=SkyCoord(ra=t_hst3d['RA (deg)'],dec=t_hst3d['DEC(deg)'], unit=(u.deg, u.deg))
splt.plotMap(coords1, coords2, galactic = False, legend=['WISP', 'HST-3D'], color=['#2ECC40', '#0074D9'],
size=[30, 40], alpha=[1.0, 1.0], filename=wisps.OUTPUT_FIGURES+'/fields_skymap.pdf')
###Output
_____no_output_____
###Markdown
This is an illustration of the modified snr
###Code
sp=wisps.Spectrum(filename='Par32_BEAM_79A')
sp=sp.splat_spectrum
sp.normalize()
#flag=sp.isEmpty()
snr1=np.array(sp.flux/sp.noise)
snr=snr1[~np.isnan(snr1)]
xgrid=np.linspace(np.nanmin(snr), np.nanmax(snr), len(sp.wave))
cdf=wisps.kde_statsmodels_m(snr, xgrid)
#sp1=wisps.Source(name='aegis-03-G141_17053')
#sp2=wisps.Source(name='Par32_BEAM_79A')
sp1=wisps.Source(name='goodss-01-G141_45889')
sp2=wisps.Source(name='goodsn-24-G141_21552')
fig, (ax2, ax1)=plt.subplots(2,1, figsize=(8, 10), sharex=True, sharey=True)
ax1.set_yticks([])
ax1.set_xlim([1.0, 1.65])
ax1.set_ylim([-0.1, 1.8])
ax1.step(sp1.wave, sp1.flux, c='#111111')
ax1.step(sp1.wave, sp1.sensitivity_curve+.4, c='#AAAAAA', linestyle='-' )
ax1.plot(sp1.wave, (sp1.flux*sp1.sensitivity_curve), c='#0074D9')
#ax1.text(1.1, 1.3, sp1.shortname.upper(), size=15, color='#B10DC9')
ax1.set_title(sp1.shortname.upper())
ax2.set_xlim([1.0, 1.65])
ax2.set_ylim([-0.1, 1.8])
ax2.step(sp2.wave, sp2.flux, c='#111111')
a1=ax2.plot(sp2.wave, sp2.sensitivity_curve+.4, c='#AAAAAA', linestyle='-')
a2=ax2.step(sp2.wave, (sp2.flux*sp2.sensitivity_curve), c='#0074D9')
#plt.
ax1.set_xlabel('Wavelength (micron)', fontsize=18)
ax2.set_xlabel('Wavelength (micron)', fontsize=18)
ax1.tick_params(axis='both', which='major', labelsize=15)
ax2.tick_params(axis='both', which='major', labelsize=15)
ax2.set_title(sp2.shortname.upper())
#ax2.text(1.1, 1.4, sp2.shortname.upper(), size=15, color='#B10DC9')
ax1.set_ylabel('Flux+ Offset', fontsize=18)
#plt.legend( ['Spectrum/Sensitivity', 'Sensitivity', 'Spectrum'], bbox_to_anchor=(0.3, 0.5), fontsize=18)
ax2.legend(['spectrum/sensitivity', 'sensitivity', 'spectrum'], loc='best')
plt.tight_layout()
plt.show()
fig.savefig(wisps.OUTPUT_FIGURES+'/sensitivity_illustration.pdf', bbox_inches='tight', fontsize=16)
fig, (ax1, ax)=plt.subplots(nrows=2, figsize=(8, 8))
ax1.plot(sp1.wave, sp1.flux/sp1.noise, c='#111111')
snratio=sp1.flux/sp1.noise
norm=np.nanmax(snratio)
sn=np.array(sp1.flux/sp1.noise)
sn=sn[~np.isnan(sn)]
#ax1.step(sp1.wave, snratio/norm+1.0)
#ax1.step(sp1.wave, sp1.noise, c='#39CCCC')
ax1.set_xlim([1.0, 1.65])
ax1.set_ylim([-0.1, 52.8])
ax1.set_xlabel('Wavelength (micron)', fontsize=18)
ax1.set_ylabel('Fux/Noise', fontsize=18)
ax1.tick_params(axis='both', which='major', labelsize=15)
ax1.text(1.45,49, 'median SNR: {}'.format(round(np.nanmedian(sn))), size=15)
ax1.text(1.45,45, 'adopted SNR: {}'.format(round(sp1.cdf_snr)), size=15)
ax1.set_title(sp1.shortname.upper(), fontsize=14)
ax.set_xscale('log')
xgrid=np.linspace(np.nanmin(sn), np.nanmax(sn), len(sp1.wave))
#ax.axhline(0.9, C='#FFDC00')
sel=np.where((0.8>sp1._snr_histogram) &(sp1._snr_histogram <0.9))[0]
cdf_snr=xgrid[sel[-1]]
ax.plot(xgrid, sp1._snr_histogram, c='#111111')
ax.fill_between( xgrid, np.zeros(len(xgrid)), sp1._snr_histogram,
where=sp1._snr_histogram>=np.zeros(len(xgrid)), facecolor='gold')
ax.axvline(cdf_snr, C='#B10DC9')
ax.axvline(np.nanmedian(sn), c='#111111', linestyle='--')
ax.set_ylabel('SNR CDF', fontsize=18)
ax.set_xlabel('SNR', fontsize=18)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.set_xticks([1.0, round(np.nanmedian(sn)), 10, round(cdf_snr), 100.0])
ax.set_xticklabels([1.0,'Median', 10, 'Adopted' , 100.0])
ax.tick_params(axis='x', colors='k', tickdir='inout', labelbottom='on')
for tick in ax.get_xticklabels():
tick.set_rotation(90)
plt.tight_layout()
fig.savefig(wisps.OUTPUT_FIGURES+'/cdf_snr_illustration.pdf', bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Results: list of candidates
###Code
import astropy.coordinates as astro_coor
sources=pd.read_pickle(wisps.OUTPUT_FILES+'/candidates.pkl')
coo=np.array([[s.coords.cartesian.x.value,
s.coords.cartesian.y.value,
s.coords.cartesian.z.value]
for s in sources ])
#galacto-centric coords
galoc = astro_coor.Galactocentric(x=coo[:,0] * u.pc,
y=coo[:,1] * u.pc,
z=coo[:,2] * u.pc,
z_sun=27 * u.pc, galcen_distance=8.3 * u.pc)
radial=np.sqrt(galoc.cartesian.x.value**2+galoc.cartesian.y.value**2)*u.pc
spts=[splat.typeToNum(s.spectral_type) for s in sources]
#galoc.cartesian.x
fig, ax=plt.subplots(figsize=(8,6))
c=ax.scatter(radial.value, galoc.cartesian.z.value , c=spts, cmap='viridis')
#c=ax.scatter(radial.value, galoc.cartesian.z.value , c=spts, cmap='Purples')
ax.set_xlabel('R (pc)', size=19)
ax.set_ylabel('Z (pc)', size=19)
cbar=fig.colorbar(c)
cbar.set_label('Spectral Type', size=19)
cbar.ax.set_yticklabels(['L0', 'L5', 'T0', 'T5'], fontsize=15)
ax.tick_params(axis='both', which='major', labelsize=15)
#plt.title('Galatic Positions of UCDs', size=18)
ax.set_xlim([0, 3000])
ax.set_ylim([0, 3000])
ax.set_xticks(np.arange(0, 3500, 100), minor=True)
ax.set_yticks(np.arange(-3000, 2500, 100), minor=True)
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
#ax.set_xticks(np.arange(0, 3500, 500), minor=True)
plt.savefig(wisps.OUTPUT_FIGURES+'/distance_positions.pdf')
#wisps.COMBINED_PHOTO_SPECTRO_DATA
img=wisps.REMOTE_FOLDER+'wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/par1/hlsp_wisp_hst_wfc3_par1-80mas_f140w_v6.2_drz.fits'
spc_img=wisps.REMOTE_FOLDER+'wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/par1/hlsp_wisp_hst_wfc3_par1-80mas_g141_v6.2_drz.fits'
from astropy.io import fits
from astropy.visualization import ZScaleInterval
fig, ax=plt.subplots(ncols=2, figsize=(10, 4))
d0=fits.open(img)[1].data
d1=fits.open(spc_img)[1].data
vmin0, vmax0=ZScaleInterval().get_limits(d0)
grid0=np.mgrid[0:d0.shape[0]:1, 0:d0.shape[1]:1]
vmin1, vmax1=ZScaleInterval().get_limits(d1)
grid1=np.mgrid[0:d1.shape[0]:1, 0:d1.shape[1]:1]
ax[0].pcolormesh(grid0[0], grid0[1], d0, cmap='Greys',
vmin=vmin0, vmax=vmax0, rasterized=True, alpha=1.0)
ax[1].pcolormesh(grid1[0], grid1[1], d1, cmap='Greys',
vmin=vmin1, vmax=vmax1, rasterized=True, alpha=1.0)
plt.tight_layout()
plt.savefig(wisps.OUTPUT_FIGURES+'/par1.pdf')
#f=fits.open(spc_img)[0]
#f.header
###Output
_____no_output_____ |
asx_stock_returns_steps_(1).ipynb | ###Markdown
In this notebook we will:* Download prices* Calculate Returns* Calculate mean and standard deviation of returnsLets load the modules first.
###Code
pip install pandas-datareader
import pandas as pd
import matplotlib.pyplot as plt
from pandas_datareader import data as pdr
###Output
_____no_output_____
###Markdown
Work around as Yahoo FInance made a change causing error with Pandas Datareader
###Code
!pip install yfinance
import yfinance as yfin
yfin.pdr_override()
###Output
Collecting yfinance
Downloading yfinance-0.1.63.tar.gz (26 kB)
Requirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.7/dist-packages (from yfinance) (1.1.5)
Requirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.7/dist-packages (from yfinance) (1.19.5)
Requirement already satisfied: requests>=2.20 in /usr/local/lib/python3.7/dist-packages (from yfinance) (2.23.0)
Requirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from yfinance) (0.0.9)
Collecting lxml>=4.5.1
Downloading lxml-4.6.3-cp37-cp37m-manylinux2014_x86_64.whl (6.3 MB)
[K |████████████████████████████████| 6.3 MB 4.2 MB/s
[?25hRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24->yfinance) (2.8.2)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24->yfinance) (2018.9)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=0.24->yfinance) (1.15.0)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (2021.5.30)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (2.10)
Building wheels for collected packages: yfinance
Building wheel for yfinance (setup.py) ... [?25l[?25hdone
Created wheel for yfinance: filename=yfinance-0.1.63-py2.py3-none-any.whl size=23918 sha256=d3bfec100e1e9d32ecff317036f13a1b8f66f66a6dc156ce6e9ee39dd09a7207
Stored in directory: /root/.cache/pip/wheels/fe/87/8b/7ec24486e001d3926537f5f7801f57a74d181be25b11157983
Successfully built yfinance
Installing collected packages: lxml, yfinance
Attempting uninstall: lxml
Found existing installation: lxml 4.2.6
Uninstalling lxml-4.2.6:
Successfully uninstalled lxml-4.2.6
Successfully installed lxml-4.6.3 yfinance-0.1.63
###Markdown
Step 1: Specify date range for analysis Here we begin by creating start and end dates using pythons datetime module.
###Code
help(pdr)
start = "2009-01-01"
end = "2021-09-15"
help(pdr.get_data_yahoo)
###Output
Help on function download in module yfinance.multi:
download(tickers, start=None, end=None, actions=False, threads=True, group_by='column', auto_adjust=False, back_adjust=False, progress=True, period='max', show_errors=True, interval='1d', prepost=False, proxy=None, rounding=False, **kwargs)
Download yahoo tickers
:Parameters:
tickers : str, list
List of tickers to download
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or _datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or _datetime.
Default is now
group_by : str
Group by 'ticker' or 'column' (default)
prepost : bool
Include Pre and Post market data in results?
Default is False
auto_adjust: bool
Adjust all OHLC automatically? Default is False
actions: bool
Download dividend + stock splits data. Default is False
threads: bool / int
How many threads to use for mass downloading. Default is True
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Optional. Round values to 2 decimal places?
show_errors: bool
Optional. Doesn't print errors if True
###Markdown
Step 2: Select the stocks/tickers you would like to analyseFor Australian stocks, yahoo tickers require '.AX' to be specified at the end of the ticker symbol. For other tickers use the search bar in yahoo finance.
###Code
tickers = ["NAB.AX"]
###Output
_____no_output_____
###Markdown
Step 3: call the Pandas_Datareader DataReader module
###Code
df = pdr.get_data_yahoo(tickers,start,end)
df.tail
df.head
###Output
_____no_output_____
###Markdown
Step 4: Understanding the pandas dataframe structureA good way to quickly understand how a pandas dataframe is structured is to look at the index and the columns.
###Code
df.index
df.columns
###Output
_____no_output_____
###Markdown
Step 5: Access useful attributes easilyWe are interested in the closing price information for all of these stocks.
###Code
df.Close
Close=df['Close']
Close
###Output
_____no_output_____
###Markdown
Step 6: Gain insights quickly with pandas describe method
###Code
Close.describe()
Close.describe(percentiles=[.1, .5, .9])
###Output
_____no_output_____
###Markdown
Step 7: Plot and save
###Code
Close.plot()
Close.plot(figsize=(20,10))
Close.plot(figsize=(12,7))
plt.savefig("nab.png")
tickers = ['NAB.AX', 'WBC.AX', 'ANZ.AX', 'CBA.AX']
df = pdr.get_data_yahoo(tickers,start,end)
df.tail(10)
df.index, df.columns
Close=df['Close']
Close
Close.describe(percentiles=[.1, .5, .9])
Close.plot(figsize=(12,7))
plt.savefig("aussie_banks.png")
###Output
_____no_output_____ |
notebooks/Week3Municipalities.ipynb | ###Markdown
Correlation analyzes of case/death rate with multiple variables in municipality level (admin2)This Notebook read the collection of variables at state level collected in this week ([variables](Week3_variables_municipalities.ipynb), [population density](Week3_population_density_states.ipynb) and [comorbidities](Week3_comorbidities.ipynb)) and shows the correlation between those variables and the case/death rate through specific areas viewpoints.
###Code
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from string import ascii_letters
import numpy as np
%matplotlib inline
%reload_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
The collection of variables is read
###Code
dfVar=pd.read_csv('../data/week3_variables_municipalities.csv')
dfPopDens=pd.read_csv('../data/week3_population_density_municipalities.csv')
dfComorbidities=pd.read_csv('../data/week3_comorbidities_municipalities.csv')
dfVar.head()
dfPopDens.head()
dfComorbidities.head()
###Output
_____no_output_____
###Markdown
All the variables are merged into one dataframe
###Code
dfAll = pd.merge(dfVar,dfPopDens,on=['cve_ent'])
dfAll = pd.merge(dfAll,dfComorbidities,on=['cve_ent'])
###Output
_____no_output_____
###Markdown
Heatmap of health variables correlation with case/death rate All the variables related to health and the case/death rate are selected
###Code
dfHealth = dfAll[['case_rate', 'case_rate_last_60_days',
'death_rate', 'death_rate_last_60_days',
'pct_disability', 'pct_limitation', 'pct_mental_problem',
'pct_no_problems', 'pct_pop_obesity', 'pct_pop_hypertension',
'pct_pop_diabetes']].copy()
###Output
_____no_output_____
###Markdown
The correlation between the health and the case/death rate is as following
###Code
corr = dfHealth.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
heatmap=sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
heatmap = heatmap.get_figure()
###Output
_____no_output_____
###Markdown
As mention on the state health correlation people with mental problems, no health problems, comorbidities mostly obesity have a higher probability of contracting the virus and dying from it, the use of the municipality data reaffirms this because there is more data to correlate and the correlation is still positive. Heatmap of Socioeconomic and Education variables correlation with case/death rate All the variables related to socioeconomic and education factors and the case/death rate are selected
###Code
dfSocioeconomicEducation = dfAll[['case_rate', 'case_rate_last_60_days',
'death_rate', 'death_rate_last_60_days','pct_no_med_insurance',
'pct_med_insurance','average_years_finish','average_household_size',
'pct_household_tics','pct_household_no_tics']].copy()
###Output
_____no_output_____
###Markdown
The correlation between the socioeconomic and education factors and the case/death rate is as following
###Code
corr = dfSocioeconomicEducation.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
heatmap=sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
heatmap = heatmap.get_figure()
###Output
_____no_output_____
###Markdown
The same behaviour occurs in the municipality level as the state level where the people with medical insurance, househouseholds with TIC's and high level of education which are all on urban areas which are highly transited meaning they have a high probability of infection and in the case of people with no medical insurance and households wit no TIC's is viceversa because they are located on urban areas which have a low transit meaning they have a low probability of infection. Heatmap of population variables correlation with case/death rate All the variables related to population factors and the case/death rate are selected
###Code
dfPopulation = dfAll[['case_rate','case_rate_last_60_days',
'death_rate', 'death_rate_last_60_days',
'population/sqkm']].copy()
###Output
_____no_output_____
###Markdown
The correlation between the population factors and the case/death rate is as following
###Code
corr = dfPopulation.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
heatmap=sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
heatmap = heatmap.get_figure()
###Output
_____no_output_____
###Markdown
In this case the mobility was not one of the variables because there was not a data source that provided this variables, but even tough the population density also corroborated that the state analyzes is correct mening that where is a high poulation density area there is a high probability of getting infected and dying on it due the high transit. The variables and the region codes are selected of the dataframe for future storage
###Code
dfFinal = dfAll[['cve_ent', 'case_rate', 'case_rate_last_60_days',
'death_rate', 'death_rate_last_60_days',
'pct_disability', 'pct_limitation', 'pct_mental_problem',
'pct_no_problems', 'pct_no_med_insurance','pct_med_insurance',
'pct_pop_obesity', 'pct_pop_hypertension',
'pct_pop_diabetes',
'average_years_finish','average_household_size', 'pct_household_tics','pct_household_no_tics',
'population/sqkm']].copy()
###Output
_____no_output_____
###Markdown
The dataframe is stored
###Code
dfFinal.to_csv('../data/week3analyzesMunicipalities.csv',index=False)
###Output
_____no_output_____ |
notebooks/bigquery:nvdb.standardized.vegobjekter_tunnellop.ipynb | ###Markdown
Denne spørringen henter ut ID, navn, kortform på tilhørende vegsystem, åpningsår, rehabiliteringsår og geometri for tunnelløp rehabilitert siden 2020, sortert på åpningsår.
###Code
query = f"""
SELECT
id,
egenskaper.navn,
ARRAY(SELECT kortform FROM UNNEST(lokasjon.vegsystemreferanser)) kortform,
egenskaper.aapningsaar,
egenskaper.rehabiliteringsaar,
lokasjon.geometri
FROM
`{project}.standardized.vegobjekter_tunnellop`
WHERE
metadata.sluttdato IS NULL
AND egenskaper.rehabiliteringsaar >= 2020
ORDER BY aapningsaar
"""
print(query)
client.query(query).to_dataframe()
###Output
_____no_output_____
###Markdown
Denne spørringen henter ID, kortform, underlagte tunnelløp, startdato, navn og lengde for aktive undersjøiske tunnelløp, sortert på navn.
###Code
query = f"""
SELECT
id,
(SELECT kortform FROM UNNEST(lokasjon.vegsystemreferanser)) kortform,
(SELECT vegobjekter FROM UNNEST(relasjoner.barn) WHERE type.id = 67) tunnellop,
metadata.startdato,
egenskaper.navn,
egenskaper.lengde_offisiell
FROM
`{project}.standardized.vegobjekter_tunnel`
WHERE
metadata.sluttdato IS NULL
AND egenskaper.undersjoisk = 'Ja'
ORDER BY
navn
"""
print(query)
client.query(query).to_dataframe()
###Output
_____no_output_____ |
generating_homophilic_networks.ipynb | ###Markdown
Generating Networks with Homophily and Preferential AttachmentAuthor: [Fariba Karimi](https://www.gesis.org/person/fariba.karimi)Please cite as: Karimi, Fariba (2019). Generating Networks with Homophily and Preferential Attachment. url:[github.com/gesiscss/HomophilyNtwNotebooks](https://github.com/gesiscss/HomophilyNtwNotebooks) SignificanceThe homophilic networks with preferential attachment algorithm generates networks with tunable homophily, group sizes, and preferential attachment. This network model is from a family of growing network models where in each time step one node arrives and attaches to the existing nodes based on the homophily and preferential attachment. The preferential attachment, or rich-get-richer, mechanism resembles the Matthew Effect. Homophily resembles nodes' tendency to attract others with specific intrinsic attributes. For example, in the context of sexual relationship, homophily among people of the same gender is low and in the context of the friendship the homophily among people of the same gender is high. The generative model is minimal and can be applied to explain inequalities that are observed in complex systems. For more information, please visit the following paper IntroductionHomophily can put minority groups at a disadvantage by restricting their ability to establish links with a majority group or to access novel information. In our paper ([Karimi et al., 2018](https://doi.org/10.1038/s41598-018-29405-7)), we show how this phenomenon can influence the ranking of minorities in examples of real-world networks with various levels of heterophily and homophily ranging from sexual contacts, dating contacts, scientific collaborations, and scientific citations. We devise a social network model with tunable homophily and group sizes, and demonstrate how the degree ranking of nodes from the minority group in a network is a function of (i) relative group sizes and (ii) the presence or absence of homophilic behaviour. We provide analytical insights on how the ranking of the minority can be improved to ensure the representativeness of the group and correct for potential biases. Our work presents a foundation for assessing the impact of homophilic and heterophilic behaviour on minorities in social networks.In this study, we focus on two main mechanisms for the formation of ties: homophily ([McPherson et al., 2001](https://doi.org/10.1146/annurev.soc.27.1.415)) and preferential attachment ([Barabási & Albert, 1999](https://doi.org/10.1126/science.286.5439.509)), and systematically study how relative size differences between groups in social networks, with various levels of homophily, impact the ranking of nodes in synthetic and real-world networks. We build on previous models by systematically exploring the parameter range for homophily and group size differences and offer analytical and empirical evidence on the emergent properties of networks and the ranking of groups.Network generators consist of simple mechanisms or rules on how nodes or links are to be attached to an initial network. One of the most basic mechanisms involves the *rich-get-richer effect* according to which a node's chance to acquire a new link is proportional to the number of links that node already has. Such an effect was described by [Merton (1968)](https://doi.org/10.1126/science.159.3810.56) for the accumulation of reward in the science system and termed the *Matthew Effect*.[Barabási & Albert (1999)](https://doi.org/10.1126/science.286.5439.509) discovered power-law effects in complex social and technological networks. They introduced a model that formally combines network *growth* and *preferential attachment*, the latter being their term for the rich-get-richer mechanism, to generate undirected random networks with a power-law degree distribution. The so-called BA Model remains one of the most fundamental network generators to date.One fundamental aspect of many social networks is that they are comprised of individuals with a variety of attributes, such as race, age, educational background, or gender. Commonly, these attributes are distributed unequally in the population. For example, in many schools across the United States and Europe, Asian or Black students form a minority ([Moody, 2001](https://doi.org/10.1086/338954)). Similarly, women are usually a minority in science and engineering ([Jadidi et al., 2018](https://doi.org/10.1142/S0219525917500114)). Additionally, *homophily*, the tendency to associate with similar others, is observed in many social networks, ranging from friendship to marriage to business partnerships. One study has shown that in school friendships, Asians and Blacks are biased towards interacting with their own race at a rate $>7$ times higher than Whites and that homophily has a nonlinear relationship with respect to relative group sizes ([Currarini et al., 2010](https://doi.org/10.1073/pnas.0911793107)).**In this notebook**, we provide an interactive generator of the homophilic-preferential attachment network model that lets the user explore the effect of parameter settings. In this model, nodes are assigned a binary attribute that resembles their group membership. These attributes can be distributed equally or unequally in the network. The probability of a new arrival node $j$ to attach to an existing node $i$ is based on the degree of the existing node $k_i$ (preferential attachment) and the homophily between $i$ and $j$, $h_{ij}$:$$ p_{ij}=\frac{h_{ij} k_i}{\sum_i{h_{ij} k_i}} $$In addition, we let the user explore how homophily affects degree growth of minorities and majorities. Dependencies and Settings
###Code
import ipywidgets
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import random as rd
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Model ``homophilic_ba_graph(N, m, minority_fraction, homophily)`` is the main function to generate the network. This function can be called from ``generate_homophilic_graph_symmetric.py``. ``N`` sets the size of the desired network (number of nodes). ``m`` is the number of nodes an arriving note connects to. It has been proved mathamatically that $m$ does not affect the structural properties of the network. ``minority_fraction`` is the minortiy fraction that can range from 0 to 0.5. ``homophily`` is the parameter that determines the tendency between two nodes to be connected. The homophily parameter ranges from 0 to 1.
###Code
from generate_homophilic_graph_symmetric import homophilic_ba_graph
###Output
_____no_output_____
###Markdown
Explore the modelAdjust the sliders to see the effect of parameter changes.
###Code
def network(N, m, minority_fraction, homophily):
G = homophilic_ba_graph(N = N, m = m, minority_fraction = minority_fraction, homophily = homophily)
color = nx.get_node_attributes(G, 'color')
d = dict(G.degree)
pos = nx.spring_layout(G, k = 0.25) # k: optimal distance between nodes
plt.figure(figsize=(6, 6))
nx.draw_networkx_edges(G, pos = pos, alpha = 0.4)
nx.draw_networkx_nodes(G, pos = pos, nodelist = color.keys(),
node_size = [v * 15 for v in d.values()],
node_color = list(color.values()),
cmap=plt.cm.Reds_r)
plt.axis('off')
plt.show()
ipywidgets.interact(network,
N=ipywidgets.IntSlider(value=100, min=10, max=200, step=10),
m=ipywidgets.IntSlider(value=2, min=1, max=5, step=1),
minority_fraction=ipywidgets.FloatSlider(value=0.2, min=0., max=0.5, step=0.1),
homophily=ipywidgets.FloatSlider(value=0.5, min=0., max=1., step=0.1))
###Output
_____no_output_____
###Markdown
Degree dynamics The degree growth for the minority group $a$ as well as the majority group $b$ changes over time as$$ k(t) \propto t^{\beta}. $$$\beta$ is the exponent of the degree growth and has the following relation with homophily $h$ and group size $f$:$$ \beta_a = \frac{f_ah}{hC + (1-h)(2-C)} + \frac{f_b(1-h)}{(1-h)C + h(2-C)} $$$C$ is a multinomial function with 3 roots where only one root is accepted. The ``analytical_prediction()`` function below calculates the values of $\beta_a$ and $\beta_b$ analytically.
###Code
def analytical_prediction(homophily, minority_fraction):
# group a is the minority
pa = minority_fraction
pb = 1 - pa
daa = homophily
dab = 1 - daa
A = -1
B = 2 + 2*pa*daa - pa*dab - dab*pb + 2*dab
C = -4*pa*daa + 2*pa*dab - 4*dab - 2*daa*dab*pa + 2*(dab**2)*pb
D = +4*daa*dab*pa
p = [A, B, C, D]
ca = np.roots(p)[1] #only one root is acceptable
beta_a = (daa*pa*(2-ca) + ca *dab*pb) / (ca*(2-ca))
beta_b = 1 - (pb*(1-beta_a))/(2 - 2*beta_a-pa)
return beta_a, beta_b
###Output
_____no_output_____
###Markdown
Comparison of numerical and analytical analysis Now we compare degree growth of the generative model (colored dots) to the analytical solution (lines) for a homophily value $h$ that can be specified. $N=5000$, $m=2$, and $f_a=0.2$ are fixed. Because it takes rather long to generate networks with 5000 nodes, growth data is loaded from a file.
###Code
def degree_growth(homophily):
N = 5000
minority_fraction = 0.2
minority_file = open('degree_growth/minority_deg_f_0.2_sim_'+str(homophily)+'.txt', 'r')
majority_file = open('degree_growth/majority_deg_f_0.2_sim_'+str(homophily)+'.txt', 'r')
x = []
y_min = []
for line in minority_file:
t, avg_deg = line.split(',')
y_min.append(float(avg_deg))
x.append(int(t))
plt.scatter(x, y_min, color = 'r', alpha = 0.4)
y_maj = []
for line in majority_file:
t, avg_deg = line.split(',')
y_maj.append(float(avg_deg))
plt.scatter(x, y_maj, color = 'b', alpha = 0.4)
##### prediction #####
beta_min, beta_maj = analytical_prediction(homophily, minority_fraction)
x = np.arange(1, N)
offset_min = 10**(-beta_min)
predicted_value = (x**beta_min) * offset_min
plt.plot(x, predicted_value, label=r'analytical $\beta_a$ = %.2f'%beta_min, linestyle='-', color='black')
offset_maj = 10**(-beta_maj)
predicted_value = (x**beta_maj) * offset_maj
plt.plot(x, predicted_value, label=r'analytical $\beta_b$ = %.2f'%beta_maj, linestyle='--', color='black')
#plt.xlim(10, 100)
#plt.ylim(1, 10)
#plt.yscale('log')
#plt.xscale('log')
plt.title('N = 5000; m = 2; minority_fraction = %s'%minority_fraction)
plt.ylabel(r"$k(t)$")
plt.xlabel(r"$t$")
plt.legend(loc='upper left')
plt.show()
ipywidgets.interact(degree_growth,
homophily=ipywidgets.SelectionSlider(value=0.8, options=[0., 0.2, 0.5, 0.8, 1.]))
###Output
_____no_output_____ |
DAY 401 ~ 500/DAY447_[BaekJoon] 그대로 출력하기 2 (Python).ipynb | ###Markdown
2021년 8월 8일 일요일 BaekJoon - 그대로 출력하기 2 (Python) 문제 : https://www.acmicpc.net/problem/11719 블로그 : https://somjang.tistory.com/entry/BaekJoon-11719%EB%B2%88-%EA%B7%B8%EB%8C%80%EB%A1%9C-%EC%B6%9C%EB%A0%A5%ED%95%98%EA%B8%B0-2-Python Solution
###Code
def just_print():
while True:
try:
string2 = input()
print(string2)
except:
break
if __name__ == "__main__":
just_print()
###Output
_____no_output_____ |
api-book/_build/jupyter_execute/chapter-3-python/data-types.ipynb | ###Markdown
Data types Strong and dynamic typingIn programming, a **strong typed** language is one that guarantees that the types of all variables are known at compile time. This means that the compiler can check the types of all variables at compile time and will not allow the program to run if the types are incorrect.A **dynamic typed** language is one that allows the types of variables to be checked at run time. This means that the compiler can not check the types of all variables at compile time, but the program will run if the types are correct.Python is a dynamic typed language. For example Python allows one to add an integer and a floating point number, but adding an integer to a string produces an error. The gain in flexibility of compiling everything at runtime add big flexibility but hinders performance in some cases.In order to reduce the number of errors and make your code as bulletproof as possible, it is essential to understand data types and use them them correctly. Data types in PythonA data type or simply type is an attribute of data which tells the compiler or interpreter how the programmer intends to use the data {cite}`wiki:data_type`. The most common data types in Python are: * Integer (int)* Floating-point number (float)* String (str)* Boolean (bool)* DateTimeTo check for a specific type of data in Python, one can use the built in function **type**.
###Code
# Importing the datetime library which holds the datetime type
import datetime
# Defining the variables
a = 1.0
b = "1"
c = 1
d = True
e = datetime.datetime.now()
# Checking the types
print(f"Type of {a} is: {type(a)}")
print(f"Type of {b} is: {type(b)}")
print(f"Type of {c} is: {type(c)}")
print(f"Type of {d} is: {type(d)}")
print(f"Type of {e} is: {type(e)}")
###Output
Type of 1.0 is: <class 'float'>
Type of 1 is: <class 'str'>
Type of 1 is: <class 'int'>
Type of True is: <class 'bool'>
Type of 2022-01-16 10:20:12.405450 is: <class 'datetime.datetime'>
###Markdown
Each data type takes up different space in computer memory.
###Code
# Importing the needed package
import sys
# Checking the size of objects
print(f"Size of the float object: {sys.getsizeof(a)} bytes")
print(f"Size of the str object: {sys.getsizeof(b)} bytes")
print(f"Size of the int object: {sys.getsizeof(c)} bytes")
print(f"Size of the boolean object: {sys.getsizeof(d)} bytes")
print(f"Size of the boolean object: {sys.getsizeof(e)} bytes")
###Output
Size of the float object: 24 bytes
Size of the str object: 50 bytes
Size of the int object: 28 bytes
Size of the boolean object: 28 bytes
Size of the boolean object: 48 bytes
###Markdown
Functionalities of various data types Every Python data type has its own attributes and methods. You can read all of them following the official Python documentation: https://docs.python.org/3/library/datatypes.html String data type String data type is probably the most popular data type in terms of methods used. To read the full list of string methods available: https://docs.python.org/3/library/stdtypes.htmlstrSome examples:
###Code
# Defining a string
string = "hello world"
print(f"Original string: {string}")
# Capitalizing the string
print(f"Capitalized string: {string.capitalize()}")
# All calps
print(f"All caps string: {string.upper()}")
# Checking if the string ends with a specific character
print(f"Does the string end with 'rld'?: {string.endswith('rld')}")
# Checking if the string starts with a specific character
print(f"Does the string starts with 'hell'?: {string.startswith('hell')}")
# Spliting the string into substrings; If no splitting char is defined, it will split by whitespace
print(f"Spliting the string into a list: {string.split()}")
###Output
Original string: hello world
Capitalized string: Hello world
All caps string: HELLO WORLD
Does the string end with 'rld'?: True
Does the string starts with 'hell'?: True
Spliting the string into a list: ['hello', 'world']
###Markdown
Datetime data type To read the full list of available datetime methods and other documentation visit:https://docs.python.org/3/library/datetime.htmlA datetime object is a single object containing all the information from a date object and a time object.Like a date object, datetime assumes the current Gregorian calendar extended in both directions; like a time object, datetime assumes there are exactly 3600*24 seconds in every day.Some examples:
###Code
# Creating a datetime object
dt = datetime.datetime.now()
print(f"The created datetime object: {dt}")
# Getting the year from the datetime object
print(f"The year from the datetime object: {dt.year}")
# Getting the month from the datetime object
print(f"The month from the datetime object: {dt.month}")
# Getting the day from the datetime object
print(f"The day from the datetime object: {dt.day}")
# Extracting the date from the datetime object
print(f"The date part: {dt.date()}")
# Converting to string (year - month - day hour:minute)
print(f"The datetime object as a string: {dt.strftime('%Y-%m-%d %H:%M')}")
###Output
The created datetime object: 2022-01-16 10:20:12.417181
The year from the datetime object: 2022
The month from the datetime object: 1
The day from the datetime object: 16
The date part: 2022-01-16
The datetime object as a string: 2022-01-16 10:20
###Markdown
Float data typeTo read the full list of available float methods and other documentation visit: https://www.geeksforgeeks.org/python-float-type-and-its-methods/Some examples:
###Code
# Defining the float data type
float_number = 67.5
print(f"The float number: {float_number}")
# Is it an integer?
print(f"Is the float number an integer? (no decimal part): {float_number.is_integer()}")
# Spliting the float into a ratio of two numbers
print(f"Two integers whose ratio produces the original float number: {float_number.as_integer_ratio()}")
# Hexadeciaml representation of the float number
print(f"Hexadecimal representation of the float number: {float_number.hex()}")
###Output
The float number: 67.5
Is the float number an integer? (no decimal part): False
Two integers whose ratio produces the original float number: (135, 2)
Hexadecimal representation of the float number: 0x1.0e00000000000p+6
|
Proyecto_02_Sarmiento_25_07 (1).ipynb | ###Markdown
Proyecto 02 Consigna En este proyecto profundizarás lo desarrollado en el proyecto 01 (“Primer modelo de Machine Learning”). El objetivo es aplicar las técnicas incorporadas (Transformación de Datos, Optimización de Hiperparámetros, Modelos Avanzados, etc.) para generar un modelo que tenga un mejor desempeño que el modelo generado en el proyecto anterior. Luego, interpreta ese modelo para responder la siguiente pregunta: ¿qué podemos aprender de nuestro problema estudiando el modelo que generamos? Importamos librerías
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
PARTE A Elige cuáles de las siguientes tareas son apropiadas para su dataset. Implementa las transformaciones que elegiste. Es importante que justifiques por qué las haces:1. Detección y eliminación de Outliers2. Encoding3. Imputación de valores faltantes4. Escalado de datos5. Generación de nuevas variables predictoras/reducción de dimensionalidad (SVD/PCA).Vuelve a entrenar el modelo implementado en la Entrega 01 - en particular, el árbol de decisión - con este nuevo dataset transformado. Evalúa su desempeño a partir del dataset obtenido luego de transformar los datos. ¿Hay una mejora en su desempeño? Compara con el desempeño obtenido en el proyecto 01. Sea cual sea la respuesta, intenta explicar a qué se debe. Cargando el DataSet
###Code
df = pd.read_csv("DS_Proyecto_01_Datos_Properati.csv")
df.head()
df.shape
df.columns
df['operation_type'].unique()
df['currency'].unique()
###Output
_____no_output_____
###Markdown
Eliminando Columnas Dado que no nos interesan las fechas, ni los títulos o descripción, y como se trata de solo propiedades en venta valuadas en USD y ubicadas en Argentina, podemos deshacernos de estas columnas
###Code
df = df.drop(columns=['l1','start_date','end_date','created_on','currency','title','description','operation_type'], axis=1)
df.head()
###Output
_____no_output_____
###Markdown
Filtrando el DataSet Para poder comparar los modelos vamos a trabajar con un dataset filtrado como el del proyecto anterior
###Code
mask=df['l2']=='Capital Federal'
capital= df[mask]
capital.l2.unique()
capital.property_type.unique()
mask1=(df['property_type']=='Departamento')|(df['property_type']=='Casa')|(df['property_type']=='PH')
capital=capital[mask1]
capital.head()
###Output
_____no_output_____
###Markdown
Tratamiento de Outliers Por la experiencia en el Primer Proyecto, sabemos que contamos con una gran cantidad de outliers que dificultan el procesamiento de los datos y la interpretación de los mismos. Creo que es necesario identificarlos y excluirlos para proceder al análisis. Analicemos en primer lugar los valores extremos que puede haber en los precios y en las superficies. Esto nos va a ser útil para luego imputar usando medidas de tendencia central, como el promedio. Recordemos que las medidas de tendencia central pueden ser sensibles a outliers, por lo que la imputación de valores faltantes puede verse afectadaUna posible pregunta que surge es si conviene primero imputar faltantes y luego remover outliers, o lo contrario. Cuando se imputa se modifica la distribución de los datos. Por eso, las cotas que utilicemos para definir los outliers se verán afectadas por este cambio y modificarán los resultados finales. Entonces comenzaremos quitando outliers.Vamos a trabajar con price_usd_per_m2. Precios altos pueden corresponderse con grandes superficies, por lo que conviene analizar juntos ambos atributos.
###Code
capital.describe()
###Output
_____no_output_____
###Markdown
FUNCIÓN PARA FILTRAR TODAS LAS COLUMNAS POR RANGO INTERCUARTÍLICO
###Code
def get_iqr_values(df_in, col_name):
median = df_in[col_name].median()
q1 = df_in[col_name].quantile(0.25) # 25th percentile / 1st quartile
q3 = df_in[col_name].quantile(0.75) # 7th percentile / 3rd quartile
iqr = q3-q1 #Interquartile range
minimum = q1-1.5*iqr # The minimum value or the |- marker in the box plot
maximum = q3+1.5*iqr # The maximum value or the -| marker in the box plot
return median, q1, q3, iqr, minimum, maximum
def get_iqr_text(df_in, col_name):
median, q1, q3, iqr, minimum, maximum = get_iqr_values(df_in, col_name)
text = f"median={median:.2f}, q1={q1:.2f}, q3={q3:.2f}, iqr={iqr:.2f}, minimum={minimum:.2f}, maximum={maximum:.2f}"
return text
def remove_outliers(df_in, col_name):
_, _, _, _, minimum, maximum = get_iqr_values(df_in, col_name)
df_out_price = df_in.loc[(df_in[col_name] > minimum) & (df_in[col_name] < maximum)]
return df_out_price
def count_outliers(df_in, col_name):
_, _, _, _, minimum, maximum = get_iqr_values(df_in, col_name)
df_outliers = df_in.loc[(df_in[col_name] <= minimum) | (df_in[col_name] >= maximum)]
return df_outliers.shape[0]
def box_and_whisker(df_in, col_name):
title = get_iqr_text(df_in, col_name)
sns.boxplot(df_in[col_name])
plt.title(title)
plt.show()
###Output
_____no_output_____
###Markdown
Precio por Metro Cuadrado Dado que el precio es bastante sensible a la cantidad de metros cuadrados vamos a crear una columna de precio por metro cuadraro para detectar los outliers
###Code
capital['price_m2']=capital["price"]/capital["surface_total"]
ax = sns.distplot(capital['price_m2'])
plt.figure(figsize=(10,5))
sns.boxplot(x='price_m2', y='property_type',data=capital)
plt.title('Boxplot Precio m2')
plt.show()
filtrado = remove_outliers(capital, 'price_m2')
box_and_whisker(filtrado, 'price_m2')
plt.figure(figsize=(10,5))
sns.boxplot(x='price_m2', y='property_type',data=filtrado)
plt.title('Boxplot Precio m2')
plt.show()
###Output
_____no_output_____
###Markdown
Superficie Cubierta
###Code
# boxplot superficie cubierta
plt.figure(figsize=(10,5))
sns.boxplot(x='surface_covered', y='property_type',data=filtrado)
plt.title('Boxplot Superficie Cubierta')
plt.show()
filtrado1 = remove_outliers(filtrado, 'surface_covered')
box_and_whisker(filtrado1, 'surface_covered')
plt.figure(figsize=(10,5))
sns.boxplot(x='surface_covered', y='property_type',data=filtrado1)
plt.title('Boxplot Superficie Cubierta')
plt.show()
###Output
_____no_output_____
###Markdown
Habitaciones
###Code
plt.figure(figsize=(10,5))
sns.boxplot(x='rooms', y='property_type',data=filtrado1)
plt.title('Boxplot Habitaciones')
plt.show()
filtrado2 = remove_outliers(filtrado1, 'rooms')
box_and_whisker(filtrado2, 'rooms')
plt.figure(figsize=(10,5))
sns.boxplot(x='rooms', y='property_type',data=filtrado2)
plt.title('Boxplot Habitaciones')
plt.show()
###Output
_____no_output_____
###Markdown
Baños
###Code
# boxplot superficie cubierta
plt.figure(figsize=(10,5))
sns.boxplot(x='bathrooms', y='property_type',data=filtrado2)
plt.title('Boxplot Baños')
plt.show()
filtrado3 = remove_outliers(filtrado2, 'bathrooms')
box_and_whisker(filtrado3, 'bathrooms')
filtrado3.describe()
###Output
_____no_output_____
###Markdown
De esta manera hemos eliminado por rango intercuartílico los outliers. Podemos proceder a tratar los nulos y faltantes. Nulos y Faltantes
###Code
filtrado3.shape
df = filtrado3.copy()
print('PORCENTAJE DE NULOS')
for col in df.columns:
nulos = df[col].isna().sum()
print(col, "{:.2f}%".format(nulos*100/146660))
df.head()
df.shape
###Output
_____no_output_____
###Markdown
Re-Escalado de Datos Z score
###Code
from sklearn.preprocessing import StandardScaler
X = np.array(df['price_m2']).reshape(-1,1)
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
df['price_m2'] = X_scaled.reshape(1,-1)[0]
ax = sns.distplot(df['price_m2'])
X = np.array(df['surface_covered']).reshape(-1,1)
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
df['surface_covered'] = X_scaled.reshape(1,-1)[0]
ax = sns.distplot(df['surface_covered'])
df.head()
###Output
_____no_output_____
###Markdown
Encoding de variables Categóricas
###Code
df.property_type.unique()
dicc = {'PH': 0, 'Departamento': 1, 'Casa': 2}
###Output
_____no_output_____
###Markdown
One Hot Encoding
###Code
df=pd.get_dummies(df, columns=["property_type"])
#df = df.drop(columns=['l2',"surface_total","z score"], axis=1)
df.head()
###Output
_____no_output_____
###Markdown
Tal cual lo esperado, codificar las etiquetas de las propiedades agranda el dataset en cantidad de columnas. Evaluaré esto a la hora de implementar los modelos. En principio, antes de utilizar los modelos avanzados, utilizaré este dataset para comparar el desempeño del modelo 01. COMPARACION CON EL MODELO DEL PRIMER PROYECTO Modelo 02
###Code
df.head(2)
X= df[['bathrooms', 'surface_covered', 'property_type_Casa', 'property_type_Departamento','property_type_PH']]
y = df.price
#Diseño el modelo de train y test 02
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
#importo librerias
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
#Defino los modelos de ML con los mismos hiperparámetros que en los modelos 01
modelo_lineal2 = LinearRegression()
arbol_regresor2 = DecisionTreeRegressor(max_depth=8, random_state=42)
vecinos_regresor2 = KNeighborsRegressor(n_neighbors=20)
#Vuelvo a entrenar los modelos
modelo_lineal2.fit(X_train, y_train)
arbol_regresor2.fit(X_train, y_train)
vecinos_regresor2.fit(X_train, y_train)
#Vuelvo a graficar las métricas de los modelos
from sklearn.metrics import mean_squared_error
import seaborn as sns
modelos = ['Regresión lineal', 'Árbol de Decisión', 'Vecinos más cercanos']
for i, model in enumerate([modelo_lineal2, arbol_regresor2, vecinos_regresor2]):
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print(f'Modelo: {modelos[i]}')
rmse_train = np.sqrt(mean_squared_error(y_train, y_train_pred))
rmse_test = np.sqrt(mean_squared_error(y_test, y_test_pred))
print(f'Raíz del error cuadrático medio en Train: {rmse_train}')
print(f'Raíz del error cuadrático medio en Test: {rmse_test}')
plt.figure(figsize = (8,4))
plt.subplot(1,2,1)
sns.distplot(y_train - y_train_pred, bins = 20, label = 'train')
sns.distplot(y_test - y_test_pred, bins = 20, label = 'test')
plt.xlabel('errores')
plt.legend()
ax = plt.subplot(1,2,2)
ax.scatter(y_test,y_test_pred, s =2)
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes]
]
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
plt.xlabel('y (test)')
plt.ylabel('y_pred (test)')
plt.tight_layout()
plt.show()
###Output
Modelo: Regresión lineal
Raíz del error cuadrático medio en Train: 59762.43553497708
Raíz del error cuadrático medio en Test: 60166.061653880184
###Markdown
En el modelo del primer proyecto el modelo de ML, que menos error tenía era el de vecinos más cercanos. Este tenía una raíz del error cuadrático medio en Test: 228.139,04.En este segundo modelo, aplicando las técnicas de preprocesamiento de datos, hemos tenido una mejora imperesionante, especialmente en el Modelo de Árbol de Decisión con las siguientes métricas:Raíz del error cuadrático medio en Train: 56.558,06Raíz del error cuadrático medio en Test: 57.127,31. Como vemos no existe una gran varianza entre train y test. PARTE B - Modelos AvanzadosElige dos de los modelos avanzados vistos Compara con el desempeño obtenido en el proyecto 01 (en el caso de regresión, considera una regresión lineal con atributos polinómicos y regularización). 1. Entrénalos y evalúalos con sus argumentos por defecto. 2. No te olvides de hacer un train/test split y usar Validación Cruzada. 3. Optimiza sus hiperparámetros mediante Validación Cruzada y Grid Search o Random Search.Compara el desempeño de los nuevos modelos entre sí y con el modelo de la Parte A. ¿Cuál elegirías? Justifica. Random Forest Importando Librerías
###Code
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import accuracy_score
import sklearn.metrics as metrics
from sklearn.model_selection import learning_curve
# Tratamiento de datos
# ==============================================================================
import numpy as np
import pandas as pd
# Gráficos
# ==============================================================================
import matplotlib.pyplot as plt
# Preprocesado y modelado
# ==============================================================================
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.inspection import permutation_importance
import multiprocessing
# Configuración warnings
# ==============================================================================
import warnings
warnings.filterwarnings('once')
###Output
_____no_output_____
###Markdown
Limpiando el Dataframe
###Code
#elimino capital federal y barrio
nuevo = df.drop(columns=['l2',"l3"], axis=1)
nuevo = nuevo.reset_index()
nuevo = nuevo.drop(columns=['index'], axis=1)
nuevo.columns
###Output
_____no_output_____
###Markdown
Train-Test Split
###Code
# Dividimos los datos en entrenamiento y prueba
from sklearn.model_selection import train_test_split
# X son nuestras variables independientes
X = nuevo.drop(["price","surface_total","lat","lon"],axis = 1)
# y es nuestra variable dependiente
y = nuevo.price
# División 75% de datos para entrenamiento, 25% de daatos para test
X_train, X_test, y_train, y_test = train_test_split(X, y,random_state=0)
###Output
_____no_output_____
###Markdown
Nos aseguramos de que ambos conjuntos (train y test) tengan una distribución similar
###Code
print('Proporcion de etiquetas positiva en los datos de Train: ', y_train.sum()/y_train.size)
print('Proporcion de etiquetas positiva en los datos de Test: ', y_test.sum()/y_test.size)
###Output
Proporcion de etiquetas positiva en los datos de Train: 186543.63477402803
Proporcion de etiquetas positiva en los datos de Test: 186004.30509177028
###Markdown
Creación del Modelo
###Code
# Creaamos el modelo de Bosques Aleatorios (y configuramos el número de estimadores (árboles de decisión))
from sklearn.ensemble import RandomForestRegressor
BA_model = RandomForestRegressor(n_estimators = 10,
random_state = 2016,
min_samples_leaf = 8,)
###Output
_____no_output_____
###Markdown
Entrenar el Modelo
###Code
BA_model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Predecir sobre el conjunto de entrenamiento y sobre el de prueba
###Code
# Predecimos sobre nuestro set de entrenamieto
y_train_pred = BA_model.predict(X_train)
# Predecimos sobre nuestro set de test
y_test_pred = BA_model.predict(X_test)
###Output
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Evaluamos con una métrica apropiada para un problema de Regresión
###Code
rmse_train = np.sqrt(mean_squared_error(y_train, y_train_pred))
rmse_test = np.sqrt(mean_squared_error(y_test, y_test_pred))
print(f'Raíz del error cuadrático medio en Train: {rmse_train}')
print(f'Raíz del error cuadrático medio en Test: {rmse_test}')
###Output
Raíz del error cuadrático medio en Train: 23755.1563842911
Raíz del error cuadrático medio en Test: 26908.19851982414
###Markdown
Podemos ver que comparados con los modelos anteriores más simples hay una mejora muy significativa, en cuanto el error cuadrático medio disminuye a 23.755 unidades para Train y a 26.908 unidades para Test. Optimización de hiperparámetros El modelo inicial se ha entrenado utilizando 10 árboles (n_estimators=10) y manteniendo el resto de hiperparámetros con su valor por defecto. Al ser hiperparámetros, no se puede saber de antemano cuál es el valor más adecuado, la forma de identificarlos es mediante el uso de estrategias de validación, por ejemplo validación cruzada. Número de árboles En Random Forest, el número de árboles no es un hiperparámetro crítico en cuanto que, añadir árboles, solo puede hacer que mejorar el resultado. En Random Forest no se produce overfitting por exceso de árboles. Sin embargo, añadir árboles una vez que la mejora se estabiliza es una perdida te recursos computacionales.
###Code
# Validación empleando el Out-of-Bag error
# ==============================================================================
train_scores = []
oob_scores = []
# Valores evaluados
estimator_range = range(1, 150, 5)
# Bucle para entrenar un modelo con cada valor de n_estimators y extraer su error
# de entrenamiento y de Out-of-Bag.
for n_estimators in estimator_range:
modelo = RandomForestRegressor(
n_estimators = n_estimators,
criterion = 'mse',
max_depth = None,
max_features = 'auto',
oob_score = True,
n_jobs = -1,
random_state = 123
)
modelo.fit(X_train, y_train)
train_scores.append(modelo.score(X_train, y_train))
oob_scores.append(modelo.oob_score_)
# Gráfico con la evolución de los errores
fig, ax = plt.subplots(figsize=(6, 3.84))
ax.plot(estimator_range, train_scores, label="train scores")
ax.plot(estimator_range, oob_scores, label="out-of-bag scores")
ax.plot(estimator_range[np.argmax(oob_scores)], max(oob_scores),
marker='o', color = "red", label="max score")
ax.set_ylabel("R^2")
ax.set_xlabel("n_estimators")
ax.set_title("Evolución del out-of-bag-error vs número árboles")
plt.legend();
print(f"Valor óptimo de n_estimators: {estimator_range[np.argmax(oob_scores)]}")
# Validación empleando k-cross-validation y neg_root_mean_squared_error
# ==============================================================================
train_scores = []
cv_scores = []
# Valores evaluados
estimator_range = range(1, 150, 5)
# Bucle para entrenar un modelo con cada valor de n_estimators y extraer su error
# de entrenamiento y de k-cross-validation.
for n_estimators in estimator_range:
modelo = RandomForestRegressor(
n_estimators = n_estimators,
criterion = 'mse',
max_depth = None,
max_features = 'auto',
oob_score = False,
n_jobs = -1,
random_state = 123
)
# Error de train
modelo.fit(X_train, y_train)
predicciones = modelo.predict(X = X_train)
rmse = mean_squared_error(
y_true = y_train,
y_pred = predicciones,
squared = False
)
train_scores.append(rmse)
# Error de validación cruzada
scores = cross_val_score(
estimator = modelo,
X = X_train,
y = y_train,
scoring = 'neg_root_mean_squared_error',
cv = 5
)
# Se agregan los scores de cross_val_score() y se pasa a positivo
cv_scores.append(-1*scores.mean())
# Gráfico con la evolución de los errores
fig, ax = plt.subplots(figsize=(6, 3.84))
ax.plot(estimator_range, train_scores, label="train scores")
ax.plot(estimator_range, cv_scores, label="cv scores")
ax.plot(estimator_range[np.argmin(cv_scores)], min(cv_scores),
marker='o', color = "red", label="min score")
ax.set_ylabel("root_mean_squared_error")
ax.set_xlabel("n_estimators")
ax.set_title("Evolución del cv-error vs número árboles")
plt.legend();
print(f"Valor óptimo de n_estimators: {estimator_range[np.argmin(cv_scores)]}")
###Output
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Con ambas gráficas podemos ver que el root mean squared error no mejora significativamente luego de n_estimators= 40, produciendo un gasto significativo de recursos computacionales a partir de este valor, que no se se compensa con una reducción del error. Max features El valor de máx_features es uno de los hiperparámetros más importantes de random forest, ya que es el que permite controlar cuánto se decorrelacionan los árboles entre sí.
###Code
# Validación empleando el Out-of-Bag error
# ==============================================================================
train_scores = []
oob_scores = []
# Valores evaluados
max_features_range = range(1, X_train.shape[1] + 1, 1)
# Bucle para entrenar un modelo con cada valor de max_features y extraer su error
# de entrenamiento y de Out-of-Bag.
for max_features in max_features_range:
modelo = RandomForestRegressor(
n_estimators = 100,
criterion = 'mse',
max_depth = None,
max_features = max_features,
oob_score = True,
n_jobs = -1,
random_state = 123
)
modelo.fit(X_train, y_train)
train_scores.append(modelo.score(X_train, y_train))
oob_scores.append(modelo.oob_score_)
# Gráfico con la evolución de los errores
fig, ax = plt.subplots(figsize=(6, 3.84))
ax.plot(max_features_range, train_scores, label="train scores")
ax.plot(max_features_range, oob_scores, label="out-of-bag scores")
ax.plot(max_features_range[np.argmax(oob_scores)], max(oob_scores),
marker='o', color = "red")
ax.set_ylabel("R^2")
ax.set_xlabel("max_features")
ax.set_title("Evolución del out-of-bag-error vs número de predictores")
plt.legend();
print(f"Valor óptimo de max_features: {max_features_range[np.argmax(oob_scores)]}")
# Validación empleando k-cross-validation y neg_root_mean_squared_error
# ==============================================================================
train_scores = []
cv_scores = []
# Valores evaluados
max_features_range = range(1, X_train.shape[1] + 1, 1)
# Bucle para entrenar un modelo con cada valor de max_features y extraer su error
# de entrenamiento y de k-cross-validation.
for max_features in max_features_range:
modelo = RandomForestRegressor(
n_estimators = 100,
criterion = 'mse',
max_depth = None,
max_features = max_features,
oob_score = True,
n_jobs = -1,
random_state = 123
)
# Error de train
modelo.fit(X_train, y_train)
predicciones = modelo.predict(X = X_train)
rmse = mean_squared_error(
y_true = y_train,
y_pred = predicciones,
squared = False
)
train_scores.append(rmse)
# Error de validación cruzada
scores = cross_val_score(
estimator = modelo,
X = X_train,
y = y_train,
scoring = 'neg_root_mean_squared_error',
cv = 5
)
# Se agregan los scores de cross_val_score() y se pasa a positivo
cv_scores.append(-1*scores.mean())
# Gráfico con la evolución de los errores
fig, ax = plt.subplots(figsize=(6, 3.84))
ax.plot(max_features_range, train_scores, label="train scores")
ax.plot(max_features_range, cv_scores, label="cv scores")
ax.plot(max_features_range[np.argmin(cv_scores)], min(cv_scores),
marker='o', color = "red", label="min score")
ax.set_ylabel("root_mean_squared_error")
ax.set_xlabel("max_features")
ax.set_title("Evolución del cv-error vs número de predictores")
plt.legend();
print(f"Valor óptimo de max_features: {max_features_range[np.argmin(cv_scores)]}")
###Output
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Acorde a las dos métricas utilizadas, el valor óptimo de max_features está entre 5 y 6 Grid Search Aunque el análisis individual de los hiperparámetros es útil para entender su impacto en el modelo e identificar rangos de interés, la búsqueda final no debe hacerse de forma secuencial, ya que cada hiperparámetro interacciona con los demás. Es preferible recurrir a grid search o random search para analizar varias combinaciones de hiperparámetros.
###Code
# Grid de hiperparámetros evaluados
# ==============================================================================
param_grid = {'n_estimators': [150],
'max_features': [5, 7, 9],
'max_depth' : [None, 3, 10, 20]
}
# Búsqueda por grid search con validación cruzada
# ==============================================================================
grid = GridSearchCV(
estimator = RandomForestRegressor(random_state = 123),
param_grid = param_grid,
scoring = 'neg_root_mean_squared_error',
n_jobs = multiprocessing.cpu_count() - 1,
cv = RepeatedKFold(n_splits=5, n_repeats=3, random_state=123),
refit = True,
verbose = 0,
return_train_score = True
)
grid.fit(X = X_train, y = y_train)
# Resultados
# ==============================================================================
resultados = pd.DataFrame(grid.cv_results_)
resultados.filter(regex = '(param.*|mean_t|std_t)') \
.drop(columns = 'params') \
.sort_values('mean_test_score', ascending = False) \
.head(4)
# Mejores hiperparámetros por validación cruzada
# ==============================================================================
print("----------------------------------------")
print("Mejores hiperparámetros encontrados (cv)")
print("----------------------------------------")
print(grid.best_params_, ":", grid.best_score_, grid.scoring)
###Output
----------------------------------------
Mejores hiperparámetros encontrados (cv)
----------------------------------------
{'max_depth': 20, 'max_features': 5, 'n_estimators': 150} : -24316.192280863546 neg_root_mean_squared_error
###Markdown
Una vez identificados los mejores hiperparámetros, se reentrena el modelo indicando los valores óptimos en sus argumentos. Si en el GridSearchCV() se indica refit=True, este reentrenamiento se hace automáticamente y el modelo resultante se encuentra almacenado en .best_estimator_.
###Code
# Error de test del modelo final
# ==============================================================================
modelo_final_rf = grid.best_estimator_
predicciones = modelo.predict(X = X_test)
rmse = mean_squared_error(
y_true = y_test,
y_pred = predicciones,
squared = False
)
print(f"El error (rmse) de test es: {rmse}")
###Output
El error (rmse) de test es: 23815.370787939155
###Markdown
Tras optimizar los hiperparámetros, se consigue reducir el error rmse del modelo a 23.815. Las predicciones del modelo final se alejan en promedio 23.815 unidades (23.815 dólares) del valor real. Importancia de predictores¶ Importancia por pureza de nodos
###Code
importancia_predictores = pd.DataFrame(
{'predictor': X_train.columns,
'importancia': modelo_final_rf.feature_importances_}
)
print("Importancia de los predictores en el modelo")
print("-------------------------------------------")
importancia_predictores.sort_values('importancia', ascending=False)
###Output
Importancia de los predictores en el modelo
-------------------------------------------
###Markdown
Importancia por permutaciones
###Code
importancia = permutation_importance(
estimator = modelo_final_rf,
X = X_train,
y = y_train,
n_repeats = 5,
scoring = 'neg_root_mean_squared_error',
n_jobs = multiprocessing.cpu_count() - 1,
random_state = 123
)
# Se almacenan los resultados (media y desviación) en un dataframe
df_importancia = pd.DataFrame(
{k: importancia[k] for k in ['importances_mean', 'importances_std']}
)
df_importancia['feature'] = X_train.columns
df_importancia.sort_values('importances_mean', ascending=False)
###Output
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Gráfico
###Code
feat_importances = pd.Series(modelo_final_rf.feature_importances_, index=X.columns)
feat_importances.nlargest(12).plot(kind='bar')
###Output
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Según se observa, superficie cubierta, precio por metro cuadraro y baños son las features más importantes para este modelo a la hora de predecir el precio final de una vivienda. XGBOOST Librerias
###Code
#Tratamiento de datos
# ==============================================================================
import numpy as np
import pandas as pd
# Gráficos
# ==============================================================================
import matplotlib.pyplot as plt
# Preprocesado y modelado
# ==============================================================================
from sklearn.datasets import load_boston
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.inspection import permutation_importance
import multiprocessing
# Configuración warnings
# ==============================================================================
import warnings
warnings.filterwarnings('once')
# División de los datos en train y test
# ==============================================================================
# X son nuestras variables independientes
X = nuevo.drop(["price","surface_total","lat","lon"],axis = 1)
# y es nuestra variable dependiente
y = nuevo.price
# División 75% de datos para entrenamiento, 25% de daatos para test
X_train, X_test, y_train, y_test = train_test_split(X, y,random_state=0)
# Creación del modelo
###Output
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Creación del Modelo
###Code
from xgboost import XGBRegressor
###Output
_____no_output_____
###Markdown
Grid Search
###Code
# Grid de hiperparámetros evaluados
# ==============================================================================
param_grid = {'max_depth' : [None, 1, 3, 5, 10, 20],
'subsample' : [0.5, 1],
'learning_rate' : [0.001, 0.01, 0.1],
'booster' : ['gbtree']
}
# Crear conjunto de validación
# ==============================================================================
np.random.seed(123)
idx_validacion = np.random.choice(
X_train.shape[0],
size= int(X_train.shape[0]*0.1),
replace=False
)
X_val = X_train.iloc[idx_validacion, :].copy()
y_val = y_train.iloc[idx_validacion].copy()
X_train_grid = X_train.reset_index(drop = True).drop(idx_validacion, axis = 0).copy()
y_train_grid = y_train.reset_index(drop = True).drop(idx_validacion, axis = 0).copy()
# XGBoost necesita pasar los paramétros específicos del entrenamiento al llamar
# al método .fit()
fit_params = {"early_stopping_rounds" : 5,
"eval_metric" : "rmse",
"eval_set" : [(X_val, y_val)],
"verbose" : 0
}
# Búsqueda por grid search con validación cruzada
# ==============================================================================
grid = GridSearchCV(
estimator = XGBRegressor(
n_estimators = 1000,
random_state = 123
),
param_grid = param_grid,
scoring = 'neg_root_mean_squared_error',
n_jobs = multiprocessing.cpu_count() - 1,
cv = RepeatedKFold(n_splits=3, n_repeats=1, random_state=123),
refit = True,
verbose = 0,
return_train_score = True
)
grid.fit(X = X_train_grid, y = y_train_grid, **fit_params)
# Resultados
# ==============================================================================
resultados = pd.DataFrame(grid.cv_results_)
resultados.filter(regex = '(param.*|mean_t|std_t)') \
.drop(columns = 'params') \
.sort_values('mean_test_score', ascending = False) \
.head(4)
# Mejores hiperparámetros por validación cruzada
# ==============================================================================
print("----------------------------------------")
print("Mejores hiperparámetros encontrados (cv)")
print("----------------------------------------")
print(grid.best_params_, ":", grid.best_score_, grid.scoring)
# Número de árboles del modelo final (early stopping)
# ==============================================================================
n_arboles_incluidos = len(grid.best_estimator_.get_booster().get_dump())
print(f"Número de árboles incluidos en el modelo: {n_arboles_incluidos}")
# Error de test del modelo final
# ==============================================================================
modelo_final_xgb = grid.best_estimator_
predicciones = modelo_final_xgb.predict(X_test)
rmse = mean_squared_error(
y_true = y_test,
y_pred = predicciones,
squared = False
)
print(f"El error (rmse) de test es: {rmse}")
## Importancia por pureza de nodos
importancia_predictores = pd.DataFrame(
{'predictor': X_train.columns,
'importancia': modelo_final_xgb.feature_importances_}
)
print("Importancia de los predictores en el modelo")
print("-------------------------------------------")
importancia_predictores.sort_values('importancia', ascending=False)
###Output
Importancia de los predictores en el modelo
-------------------------------------------
###Markdown
Importancia por permutación
###Code
importancia = permutation_importance(
estimator = modelo_final_xgb,
X = X_train,
y = y_train,
n_repeats = 5,
scoring = 'neg_root_mean_squared_error',
n_jobs = multiprocessing.cpu_count() - 1,
random_state = 123
)
# Se almacenan los resultados (media y desviación) en un dataframe
df_importancia = pd.DataFrame(
{k: importancia[k] for k in ['importances_mean', 'importances_std']}
)
df_importancia['feature'] = X_train.columns
df_importancia.sort_values('importances_mean', ascending=False)
# Gráfico
feat_importances = pd.Series(modelo_final_xgb.feature_importances_, index=X.columns)
feat_importances.nlargest(12).plot(kind='bar')
###Output
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Según se observa, superficie cubierta, precio por metro cuadraro y baños son las features más importantes para este modelo a la hora de predecir el precio final de una vivienda. Tras optimizar los hiperparámetros, se consigue reducir el error rmse del modelo a 23.815. Las predicciones del modelo final se alejan en promedio 24.119 unidades (24.119 dólares) del valor real.
###Code
#Vuelvo a graficar las métricas de los modelos
from sklearn.metrics import mean_squared_error
import seaborn as sns
modelos = ['Random Forest', 'X G BOOST']
for i, model in enumerate([modelo_final_rf, modelo_final_xgb]):
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print(f'Modelo: {modelos[i]}')
rmse_train = np.sqrt(mean_squared_error(y_train, y_train_pred))
rmse_test = np.sqrt(mean_squared_error(y_test, y_test_pred))
print(f'Raíz del error cuadrático medio en Train: {rmse_train}')
print(f'Raíz del error cuadrático medio en Test: {rmse_test}')
plt.figure(figsize = (8,4))
plt.subplot(1,2,1)
sns.distplot(y_train - y_train_pred, bins = 20, label = 'train')
sns.distplot(y_test - y_test_pred, bins = 20, label = 'test')
plt.xlabel('errores')
plt.legend()
ax = plt.subplot(1,2,2)
ax.scatter(y_test,y_test_pred, s =2)
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes]
]
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
plt.xlabel('y (test)')
plt.ylabel('y_pred (test)')
plt.tight_layout()
plt.show()
###Output
Modelo: Random Forest
Raíz del error cuadrático medio en Train: 9720.859415366289
Raíz del error cuadrático medio en Test: 23412.038651602637
|
4.databases/answers/3.conll-to-xml.ipynb | ###Markdown
D’un format à l’autre Vous disposez d’un fichier [*ftb-dependencies.conll*](../data/ftb-dependencies.conll) au format [CoNLL-U](https://universaldependencies.org/format.html) issu du [*French Treebank*](http://ftb.linguist.univ-paris-diderot.fr/). Votre objectif est de transformer ce fichier au format XML, en ne retenant que certaines informations :- le mot (2e colonne) ;- son lemme (3e colonne) ;- sa catégorie grammaticale (4e colonne).Au final, vous devriez obtenir la structure ci-dessous :```xml Le petit chat est mort . …```Comme toujours, il existe plusieurs façons de procéder. Nous vous recommandons d’essayer dans un premier temps de lire le fichier CoNLL comme s’il s’agissait de texte brut, et de l’analyser afin de récolter une liste de phrases qui sont elles-mêmes des listes de tuples comprenant le mot, son lemme et sa catégorie grammaticale :```pysentences = [ [ ('Le', 'le', 'DET'), ('petit', 'petit', 'ADJ'), ('chat', 'chat', 'NC'), ('est', 'être', 'V'), ('mort', 'mort', 'ADJ'), ('.', '.', 'PONCT') ] …]```Pour obtenir les tuples, il vous suffit de trouver le caractère de séparation entre chaque champ puis d’isoler les informations nécessaires.Et pour délimiter les phrases, repérez le symbole qui caractérise un saut de ligne. Ce devrait être le signal pour terminer la phrase en cours de construction et en commencer une nouvelle.
###Code
# Your code here
sentences = list()
sentence = list()
with open('../data/ftb-dependencies.conll') as f:
for row in f:
# Separator: tabulation
data = row.split('\t')
# Blank row? Then append the sentence and create new one
if data[0] == '\n':
sentences.append(sentence)
sentence = list()
# Otherwise, insert the word in the current sentence
else:
sentence.append((data[1], data[2], data[3]))
###Output
_____no_output_____
###Markdown
Et, dans un second temps, construisez le fichier XML !
###Code
# Your code here
import xml.etree.ElementTree as ET
sents = ET.Element('sents')
for sentence in sentences:
sent = ET.SubElement(sents, 'sent')
for word, lemma, pos in sentence:
w = ET.SubElement(sent, 'w')
w.text = word
w.set('lemma', lemma)
w.set('pos', pos)
tree = ET.ElementTree(sents)
tree.write('../data/ftb-corpus.xml')
###Output
_____no_output_____ |
labs/Module03/03-05-Transformations Eigenvectors and Eigenvalues.ipynb | ###Markdown
Transformations, Eigenvectors, and EigenvaluesMatrices and vectors are used together to manipulate spatial dimensions. This has a lot of applications, including the mathematical generation of 3D computer graphics, geometric modeling, and the training and optimization of machine learning algorithms. We're not going to cover the subject exhaustively here; but we'll focus on a few key concepts that are useful to know when you plan to work with machine learning. Linear TransformationsYou can manipulate a vector by multiplying it with a matrix. The matrix acts a function that operates on an input vector to produce a vector output. Specifically, matrix multiplications of vectors are *linear transformations* that transform the input vector into the output vector.For example, consider this matrix ***A*** and vector ***v***:$$ A = \begin{bmatrix}2 & 3\\5 & 2\end{bmatrix} \;\;\;\; \vec{v} = \begin{bmatrix}1\\2\end{bmatrix}$$We can define a transformation ***T*** like this:$$ T(\vec{v}) = A\vec{v} $$To perform this transformation, we simply calculate the dot product by applying the *RC* rule; multiplying each row of the matrix by the single column of the vector:$$\begin{bmatrix}2 & 3\\5 & 2\end{bmatrix} \cdot \begin{bmatrix}1\\2\end{bmatrix} = \begin{bmatrix}8\\9\end{bmatrix}$$Here's the calculation in Python:
###Code
import numpy as np
v = np.array([1,2])
A = np.array([[2,3],
[5,2]])
t = A@v
print (t)
###Output
_____no_output_____
###Markdown
In this case, both the input vector and the output vector have 2 components - in other words, the transformation takes a 2-dimensional vector and produces a new 2-dimensional vector; which we can indicate like this:$$ T: \rm I\!R^{2} \to \rm I\!R^{2} $$Note that the output vector may have a different number of dimensions from the input vector; so the matrix function might transform the vector from one space to another - or in notation, ${\rm I\!R}$n -> ${\rm I\!R}$m.For example, let's redefine matrix ***A***, while retaining our original definition of vector ***v***:$$ A = \begin{bmatrix}2 & 3\\5 & 2\\1 & 1\end{bmatrix} \;\;\;\; \vec{v} = \begin{bmatrix}1\\2\end{bmatrix}$$Now if we once again define ***T*** like this:$$ T(\vec{v}) = A\vec{v} $$We apply the transformation like this:$$\begin{bmatrix}2 & 3\\5 & 2\\1 & 1\end{bmatrix} \cdot \begin{bmatrix}1\\2\end{bmatrix} = \begin{bmatrix}8\\9\\3\end{bmatrix}$$So now, our transformation transforms the vector from 2-dimensional space to 3-dimensional space:$$ T: \rm I\!R^{2} \to \rm I\!R^{3} $$Here it is in Python:
###Code
import numpy as np
v = np.array([1,2])
A = np.array([[2,3],
[5,2],
[1,1]])
t = A@v
print (t)
import numpy as np
v = np.array([1,2])
A = np.array([[1,2],
[2,1]])
t = A@v
print (t)
###Output
_____no_output_____
###Markdown
Transformations of Magnitude and AmplitudeWhen you multiply a vector by a matrix, you transform it in at least one of the following two ways:* Scale the length (*magnitude*) of the matrix to make it longer or shorter* Change the direction (*amplitude*) of the matrixFor example consider the following matrix and vector:$$ A = \begin{bmatrix}2 & 0\\0 & 2\end{bmatrix} \;\;\;\; \vec{v} = \begin{bmatrix}1\\0\end{bmatrix}$$As before, we transform the vector ***v*** by multiplying it with the matrix ***A***:\begin{equation}\begin{bmatrix}2 & 0\\0 & 2\end{bmatrix} \cdot \begin{bmatrix}1\\0\end{bmatrix} = \begin{bmatrix}2\\0\end{bmatrix}\end{equation}In this case, the resulting vector has changed in length (*magnitude*), but has not changed its direction (*amplitude*).Let's visualize that in Python:
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
v = np.array([1,0])
A = np.array([[2,0],
[0,2]])
t = A@v
print (t)
# Plot v and t
vecs = np.array([t,v])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
###Output
_____no_output_____
###Markdown
The original vector ***v*** is shown in orange, and the transformed vector ***t*** is shown in blue - note that ***t*** has the same direction (*amplitude*) as ***v*** but a greater length (*magnitude*).Now let's use a different matrix to transform the vector ***v***:\begin{equation}\begin{bmatrix}0 & -1\\1 & 0\end{bmatrix} \cdot \begin{bmatrix}1\\0\end{bmatrix} = \begin{bmatrix}0\\1\end{bmatrix}\end{equation}This time, the resulting vector has been changed to a different amplitude, but has the same magnitude.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
v = np.array([1,0])
A = np.array([[0,-1],
[1,0]])
t = A@v
print (t)
# Plot v and t
vecs = np.array([v,t])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['orange', 'blue'], scale=10)
plt.show()
###Output
_____no_output_____
###Markdown
Now let's see change the matrix one more time:\begin{equation}\begin{bmatrix}2 & 1\\1 & 2\end{bmatrix} \cdot \begin{bmatrix}1\\0\end{bmatrix} = \begin{bmatrix}2\\1\end{bmatrix}\end{equation}Now our resulting vector has been transformed to a new amplitude *and* magnitude - the transformation has affected both direction and scale.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
v = np.array([1,0])
A = np.array([[2,1],
[1,2]])
t = A@v
print (t)
# Plot v and t
vecs = np.array([v,t])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['orange', 'blue'], scale=10)
plt.show()
###Output
_____no_output_____
###Markdown
Afine TransformationsAn Afine transformation multiplies a vector by a matrix and adds an offset vector, sometimes referred to as *bias*; like this:$$T(\vec{v}) = A\vec{v} + \vec{b}$$For example:\begin{equation}\begin{bmatrix}5 & 2\\3 & 1\end{bmatrix} \cdot \begin{bmatrix}1\\1\end{bmatrix} + \begin{bmatrix}-2\\-6\end{bmatrix} = \begin{bmatrix}5\\-2\end{bmatrix}\end{equation}This kind of transformation is actually the basis of linear regression, which is a core foundation for machine learning. The matrix defines the *features*, the first vector is the *coefficients*, and the bias vector is the *intercept*.here's an example of an Afine transformation in Python:
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
v = np.array([1,1])
A = np.array([[5,2],
[3,1]])
b = np.array([-2,-6])
t = A@v + b
print (t)
# Plot v and t
vecs = np.array([v,t])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['orange', 'blue'], scale=15)
plt.show()
###Output
_____no_output_____
###Markdown
Eigenvectors and EigenvaluesSo we can see that when you transform a vector using a matrix, we change its direction, length, or both. When the transformation only affects scale (in other words, the output vector has a different magnitude but the same amplitude as the input vector), the matrix multiplication for the transformation is the equivalent operation as some scalar multiplication of the vector.For example, earlier we examined the following transformation that dot-mulitplies a vector by a matrix:$$\begin{bmatrix}2 & 0\\0 & 2\end{bmatrix} \cdot \begin{bmatrix}1\\0\end{bmatrix} = \begin{bmatrix}2\\0\end{bmatrix}$$You can achieve the same result by mulitplying the vector by the scalar value ***2***:$$2 \times \begin{bmatrix}1\\0\end{bmatrix} = \begin{bmatrix}2\\0\end{bmatrix}$$The following python performs both of these calculation and shows the results, which are identical.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
v = np.array([1,0])
A = np.array([[2,0],
[0,2]])
t1 = A@v
print (t1)
t2 = 2*v
print (t2)
fig = plt.figure()
a=fig.add_subplot(1,1,1)
# Plot v and t1
vecs = np.array([t1,v])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
a=fig.add_subplot(1,2,1)
# Plot v and t2
vecs = np.array([t2,v])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
###Output
_____no_output_____
###Markdown
In cases like these, where a matrix transformation is the equivelent of a scalar-vector multiplication, the scalar-vector pairs that correspond to the matrix are known respectively as eigenvalues and eigenvectors. We generally indicate eigenvalues using the Greek letter lambda (λ), and the formula that defines eigenvalues and eigenvectors with respect to a transformation is:$$ T(\vec{v}) = \lambda\vec{v}$$Where the vector ***v*** is an eigenvector and the value ***λ*** is an eigenvalue for transformation ***T***.When the transformation ***T*** is represented as a matrix multiplication, as in this case where the transformation is represented by matrix ***A***:$$ T(\vec{v}) = A\vec{v} = \lambda\vec{v}$$Then ***v*** is an eigenvector and ***λ*** is an eigenvalue of ***A***.A matrix can have multiple eigenvector-eigenvalue pairs, and you can calculate them manually. However, it's generally easier to use a tool or programming language. For example, in Python you can use the ***linalg.eig*** function, which returns an array of eigenvalues and a matrix of the corresponding eigenvectors for the specified matrix.Here's an example that returns the eigenvalue and eigenvector pairs for the following matrix:$$A=\begin{bmatrix}2 & 0\\0 & 3\end{bmatrix}$$
###Code
import numpy as np
A = np.array([[2,0],
[0,3]])
eVals, eVecs = np.linalg.eig(A)
print(eVals)
print(eVecs)
###Output
_____no_output_____
###Markdown
So there are two eigenvalue-eigenvector pairs for this matrix, as shown here:$$ \lambda_{1} = 2, \vec{v_{1}} = \begin{bmatrix}1 \\ 0\end{bmatrix} \;\;\;\;\;\; \lambda_{2} = 3, \vec{v_{2}} = \begin{bmatrix}0 \\ 1\end{bmatrix} $$Let's verify that multiplying each eigenvalue-eigenvector pair corresponds to the dot-product of the eigenvector and the matrix. Here's the first pair:$$ 2 \times \begin{bmatrix}1 \\ 0\end{bmatrix} = \begin{bmatrix}2 \\ 0\end{bmatrix} \;\;\;and\;\;\; \begin{bmatrix}2 & 0\\0 & 3\end{bmatrix} \cdot \begin{bmatrix}1 \\ 0\end{bmatrix} = \begin{bmatrix}2 \\ 0\end{bmatrix} $$So far so good. Now let's check the second pair:$$ 3 \times \begin{bmatrix}0 \\ 1\end{bmatrix} = \begin{bmatrix}0 \\ 3\end{bmatrix} \;\;\;and\;\;\; \begin{bmatrix}2 & 0\\0 & 3\end{bmatrix} \cdot \begin{bmatrix}0 \\ 1\end{bmatrix} = \begin{bmatrix}0 \\ 3\end{bmatrix} $$So our eigenvalue-eigenvector scalar multiplications do indeed correspond to our matrix-eigenvector dot-product transformations.Here's the equivalent code in Python, using the ***eVals*** and ***eVecs*** variables you generated in the previous code cell:
###Code
vec1 = eVecs[:,0]
lam1 = eVals[0]
print('Matrix A:')
print(A)
print('-------')
print('lam1: ' + str(lam1))
print ('v1: ' + str(vec1))
print ('Av1: ' + str(A@vec1))
print ('lam1 x v1: ' + str(lam1*vec1))
print('-------')
vec2 = eVecs[:,1]
lam2 = eVals[1]
print('lam2: ' + str(lam2))
print ('v2: ' + str(vec2))
print ('Av2: ' + str(A@vec2))
print ('lam2 x v2: ' + str(lam2*vec2))
###Output
_____no_output_____
###Markdown
You can use the following code to visualize these transformations:
###Code
t1 = lam1*vec1
print (t1)
t2 = lam2*vec2
print (t2)
fig = plt.figure()
a=fig.add_subplot(1,1,1)
# Plot v and t1
vecs = np.array([t1,vec1])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
a=fig.add_subplot(1,2,1)
# Plot v and t2
vecs = np.array([t2,vec2])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
###Output
_____no_output_____
###Markdown
Similarly, earlier we examined the following matrix transformation:$$\begin{bmatrix}2 & 0\\0 & 2\end{bmatrix} \cdot \begin{bmatrix}1\\0\end{bmatrix} = \begin{bmatrix}2\\0\end{bmatrix}$$And we saw that you can achieve the same result by mulitplying the vector by the scalar value ***2***:$$2 \times \begin{bmatrix}1\\0\end{bmatrix} = \begin{bmatrix}2\\0\end{bmatrix}$$This works because the scalar value 2 and the vector (1,0) are an eigenvalue-eigenvector pair for this matrix.Let's use Python to determine the eigenvalue-eigenvector pairs for this matrix:
###Code
import numpy as np
A = np.array([[2,0],
[0,2]])
eVals, eVecs = np.linalg.eig(A)
print(eVals)
print(eVecs)
###Output
_____no_output_____
###Markdown
So once again, there are two eigenvalue-eigenvector pairs for this matrix, as shown here:$$ \lambda_{1} = 2, \vec{v_{1}} = \begin{bmatrix}1 \\ 0\end{bmatrix} \;\;\;\;\;\; \lambda_{2} = 2, \vec{v_{2}} = \begin{bmatrix}0 \\ 1\end{bmatrix} $$Let's verify that multiplying each eigenvalue-eigenvector pair corresponds to the dot-product of the eigenvector and the matrix. Here's the first pair:$$ 2 \times \begin{bmatrix}1 \\ 0\end{bmatrix} = \begin{bmatrix}2 \\ 0\end{bmatrix} \;\;\;and\;\;\; \begin{bmatrix}2 & 0\\0 & 2\end{bmatrix} \cdot \begin{bmatrix}1 \\ 0\end{bmatrix} = \begin{bmatrix}2 \\ 0\end{bmatrix} $$Well, we already knew that. Now let's check the second pair:$$ 2 \times \begin{bmatrix}0 \\ 1\end{bmatrix} = \begin{bmatrix}0 \\ 2\end{bmatrix} \;\;\;and\;\;\; \begin{bmatrix}2 & 0\\0 & 2\end{bmatrix} \cdot \begin{bmatrix}0 \\ 1\end{bmatrix} = \begin{bmatrix}0 \\ 2\end{bmatrix} $$Now let's use Pythonto verify and plot these transformations:
###Code
vec1 = eVecs[:,0]
lam1 = eVals[0]
print('Matrix A:')
print(A)
print('-------')
print('lam1: ' + str(lam1))
print ('v1: ' + str(vec1))
print ('Av1: ' + str(A@vec1))
print ('lam1 x v1: ' + str(lam1*vec1))
print('-------')
vec2 = eVecs[:,1]
lam2 = eVals[1]
print('lam2: ' + str(lam2))
print ('v2: ' + str(vec2))
print ('Av2: ' + str(A@vec2))
print ('lam2 x v2: ' + str(lam2*vec2))
# Plot the resulting vectors
t1 = lam1*vec1
t2 = lam2*vec2
fig = plt.figure()
a=fig.add_subplot(1,1,1)
# Plot v and t1
vecs = np.array([t1,vec1])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
a=fig.add_subplot(1,2,1)
# Plot v and t2
vecs = np.array([t2,vec2])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
###Output
_____no_output_____
###Markdown
Let's take a look at one more, slightly more complex example. Here's our matrix:$$\begin{bmatrix}2 & 1\\1 & 2\end{bmatrix}$$Let's get the eigenvalue and eigenvector pairs:
###Code
import numpy as np
A = np.array([[2,1],
[1,2]])
eVals, eVecs = np.linalg.eig(A)
print(eVals)
print(eVecs)
###Output
_____no_output_____
###Markdown
This time the eigenvalue-eigenvector pairs are:$$ \lambda_{1} = 3, \vec{v_{1}} = \begin{bmatrix}0.70710678 \\ 0.70710678\end{bmatrix} \;\;\;\;\;\; \lambda_{2} = 1, \vec{v_{2}} = \begin{bmatrix}-0.70710678 \\ 0.70710678\end{bmatrix} $$So let's check the first pair:$$ 3 \times \begin{bmatrix}0.70710678 \\ 0.70710678\end{bmatrix} = \begin{bmatrix}2.12132034 \\ 2.12132034\end{bmatrix} \;\;\;and\;\;\; \begin{bmatrix}2 & 1\\0 & 2\end{bmatrix} \cdot \begin{bmatrix}0.70710678 \\ 0.70710678\end{bmatrix} = \begin{bmatrix}2.12132034 \\ 2.12132034\end{bmatrix} $$Now let's check the second pair:$$ 1 \times \begin{bmatrix}-0.70710678 \\ 0.70710678\end{bmatrix} = \begin{bmatrix}-0.70710678\\0.70710678\end{bmatrix} \;\;\;and\;\;\; \begin{bmatrix}2 & 1\\1 & 2\end{bmatrix} \cdot \begin{bmatrix}-0.70710678 \\ 0.70710678\end{bmatrix} = \begin{bmatrix}-0.70710678\\0.70710678\end{bmatrix} $$With more complex examples like this, it's generally easier to do it with Python:
###Code
vec1 = eVecs[:,0]
lam1 = eVals[0]
print('Matrix A:')
print(A)
print('-------')
print('lam1: ' + str(lam1))
print ('v1: ' + str(vec1))
print ('Av1: ' + str(A@vec1))
print ('lam1 x v1: ' + str(lam1*vec1))
print('-------')
vec2 = eVecs[:,1]
lam2 = eVals[1]
print('lam2: ' + str(lam2))
print ('v2: ' + str(vec2))
print ('Av2: ' + str(A@vec2))
print ('lam2 x v2: ' + str(lam2*vec2))
# Plot the results
t1 = lam1*vec1
t2 = lam2*vec2
fig = plt.figure()
a=fig.add_subplot(1,1,1)
# Plot v and t1
vecs = np.array([t1,vec1])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
a=fig.add_subplot(1,2,1)
# Plot v and t2
vecs = np.array([t2,vec2])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['blue', 'orange'], scale=10)
plt.show()
###Output
_____no_output_____
###Markdown
EigendecompositionSo we've learned a little about eigenvalues and eigenvectors; but you may be wondering what use they are. Well, one use for them is to help decompose transformation matrices.Recall that previously we found that a matrix transformation of a vector changes its magnitude, amplitude, or both. Without getting too technical about it, we need to remember that vectors can exist in any spatial orientation, or *basis*; and the same transformation can be applied in different *bases*.We can decompose a matrix using the following formula:$$A = Q \Lambda Q^{-1}$$Where ***A*** is a trasformation that can be applied to a vector in its current base, ***Q*** is a matrix of eigenvectors that defines a change of basis, and ***Λ*** is a matrix with eigenvalues on the diagonal that defines the same linear transformation as ***A*** in the base defined by ***Q***.Let's look at these in some more detail. Consider this matrix:$$A=\begin{bmatrix}3 & 2\\1 & 0\end{bmatrix}$$***Q*** is a matrix in which each column is an eigenvector of ***A***; which as we've seen previously, we can calculate using Python:
###Code
import numpy as np
A = np.array([[3,2],
[1,0]])
l, Q = np.linalg.eig(A)
print(Q)
###Output
_____no_output_____
###Markdown
So for matrix ***A***, ***Q*** is the following matrix:$$Q=\begin{bmatrix}0.96276969 & -0.48963374\\0.27032301 & 0.87192821\end{bmatrix}$$***Λ*** is a matrix that contains the eigenvalues for ***A*** on the diagonal, with zeros in all other elements; so for a 2x2 matrix, Λ will look like this:$$\Lambda=\begin{bmatrix}\lambda_{1} & 0\\0 & \lambda_{2}\end{bmatrix}$$In our Python code, we've already used the ***linalg.eig*** function to return the array of eigenvalues for ***A*** into the variable ***l***, so now we just need to format that as a matrix:
###Code
L = np.diag(l)
print (L)
###Output
_____no_output_____
###Markdown
So ***Λ*** is the following matrix:$$\Lambda=\begin{bmatrix}3.56155281 & 0\\0 & -0.56155281\end{bmatrix}$$Now we just need to find ***Q-1***, which is the inverse of ***Q***:
###Code
Qinv = np.linalg.inv(Q)
print(Qinv)
###Output
_____no_output_____
###Markdown
The inverse of ***Q*** then, is:$$Q^{-1}=\begin{bmatrix}0.89720673 & 0.50382896\\-0.27816009 & 0.99068183\end{bmatrix}$$So what does that mean? Well, it means that we can decompose the transformation of *any* vector multiplied by matrix ***A*** into the separate operations ***QΛQ-1***:$$A\vec{v} = Q \Lambda Q^{-1}\vec{v}$$To prove this, let's take vector ***v***:$$\vec{v} = \begin{bmatrix}1\\3\end{bmatrix} $$Our matrix transformation using ***A*** is:$$\begin{bmatrix}3 & 2\\1 & 0\end{bmatrix} \cdot \begin{bmatrix}1\\3\end{bmatrix} $$So let's show the results of that using Python:
###Code
v = np.array([1,3])
t = A@v
print(t)
# Plot v and t
vecs = np.array([v,t])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['orange', 'b'], scale=20)
plt.show()
###Output
_____no_output_____
###Markdown
And now, let's do the same thing using the ***QΛQ-1*** sequence of operations:
###Code
import math
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
t = (Q@(L@(Qinv)))@v
# Plot v and t
vecs = np.array([v,t])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['orange', 'b'], scale=20)
plt.show()
###Output
_____no_output_____
###Markdown
So ***A*** and ***QΛQ-1*** are equivalent.If we view the intermediary stages of the decomposed transformation, you can see the transformation using ***A*** in the original base for ***v*** (orange to blue) and the transformation using ***Λ*** in the change of basis decribed by ***Q*** (red to magenta):
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
t1 = Qinv@v
t2 = L@t1
t3 = Q@t2
# Plot the transformations
vecs = np.array([v,t1, t2, t3])
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, vecs[:,0], vecs[:,1], color=['orange', 'red', 'magenta', 'blue'], scale=20)
plt.show()
###Output
_____no_output_____
###Markdown
So from this visualization, it should be apparent that the transformation ***Av*** can be performed by changing the basis for ***v*** using ***Q*** (from orange to red in the above plot) applying the equivalent linear transformation in that base using ***Λ*** (red to magenta), and switching back to the original base using ***Q-1*** (magenta to blue). Rank of a MatrixThe **rank** of a square matrix is the number of non-zero eigenvalues of the matrix. A **full rank** matrix has the same number of non-zero eigenvalues as the dimension of the matrix. A **rank-deficient** matrix has fewer non-zero eigenvalues as dimensions. The inverse of a rank deficient matrix is singular and so does not exist (this is why in a previous notebook we noted that some matrices have no inverse).Consider the following matrix ***A***:$$A=\begin{bmatrix}1 & 2\\4 & 3\end{bmatrix}$$Let's find its eigenvalues (***Λ***):
###Code
import numpy as np
A = np.array([[1,2],
[4,3]])
l, Q = np.linalg.eig(A)
L = np.diag(l)
print(L)
###Output
_____no_output_____
###Markdown
$$\Lambda=\begin{bmatrix}-1 & 0\\0 & 5\end{bmatrix}$$This matrix has full rank. The dimensions of the matrix is 2. There are two non-zero eigenvalues. Now consider this matrix:$$B=\begin{bmatrix}3 & -3 & 6\\2 & -2 & 4\\1 & -1 & 2\end{bmatrix}$$Note that the second and third columns are just scalar multiples of the first column.Let's examine it's eigenvalues:
###Code
B = np.array([[3,-3,6],
[2,-2,4],
[1,-1,2]])
lb, Qb = np.linalg.eig(B)
Lb = np.diag(lb)
print(Lb)
###Output
_____no_output_____
###Markdown
$$\Lambda=\begin{bmatrix}3 & 0& 0\\0 & -6\times10^{-17} & 0\\0 & 0 & 3.6\times10^{-16}\end{bmatrix}$$Note that matrix has only 1 non-zero eigenvalue. The other two eigenvalues are so extremely small as to be effectively zero. This is an example of a rank-deficient matrix; and as such, it has no inverse. Inverse of a Square Full Rank MatrixYou can calculate the inverse of a square full rank matrix by using the following formula:$$A^{-1} = Q \Lambda^{-1} Q^{-1}$$Let's apply this to matrix ***A***:$$A=\begin{bmatrix}1 & 2\\4 & 3\end{bmatrix}$$Let's find the matrices for ***Q***, ***Λ-1***, and ***Q-1***:
###Code
import numpy as np
A = np.array([[1,2],
[4,3]])
l, Q = np.linalg.eig(A)
L = np.diag(l)
print(Q)
Linv = np.linalg.inv(L)
Qinv = np.linalg.inv(Q)
print(Linv)
print(Qinv)
###Output
_____no_output_____
###Markdown
So:$$A^{-1}=\begin{bmatrix}-0.70710678 & -0.4472136\\0.70710678 & -0.89442719\end{bmatrix}\cdot\begin{bmatrix}-1 & -0\\0 & 0.2\end{bmatrix}\cdot\begin{bmatrix}-0.94280904 & 0.47140452\\-0.74535599 & -0.74535599\end{bmatrix}$$Let's calculate that in Python:
###Code
Ainv = (Q@(Linv@(Qinv)))
print(Ainv)
###Output
_____no_output_____
###Markdown
That gives us the result:$$A^{-1}=\begin{bmatrix}-0.6 & 0.4\\0.8 & -0.2\end{bmatrix}$$We can apply the ***np.linalg.inv*** function directly to ***A*** to verify this:
###Code
print(np.linalg.inv(A))
###Output
_____no_output_____ |
covid_alberta.ipynb | ###Markdown
Analysis & Visualization of COVID-19 in Alberta Import libraries
###Code
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import numpy as np
import seaborn as sns
import io
import requests
###Output
_____no_output_____
###Markdown
Read the datasetThe datasets used in this notebook are from the Government of Canada's [COVID-19 daily epidemiology update](https://health-infobase.canada.ca/covid-19/epidemiological-summary-covid-19-cases.html) and the Alberta Government's [COVID-19 Alberta statistics](https://www.alberta.ca/stats/covid-19-alberta-statistics.htm). The epidemiology update contains daily COVID-19 cases for the Canadian provinces and territories. This notebook uses the Alberta statistics dataset to show the daily COVID-19 cases by demography. All other analysis is done using the Government of Canada's daily epidemiology update data.Links to download datasets:* Canada dataset: https://health-infobase.canada.ca/src/data/covidLive/covid19-download.csv* Alberta dataset: https://www.alberta.ca/data/stats/covid-19-alberta-statistics-data.csv ---Interesting columns from the Government of Canada's dataset:* prname, Province nam* date, Report date* numconf, Number of confirmed cases* numdeaths, Number of deaths* numtotal, Total number of cases, "Calculated by number of confirmed cases plus number of probable cases (numconf+numprob) "* numtested, Number of individuals tested* numrecover, Number of recovered cases* percentrecover, Percentage of recovered cases* ratetested, Testing rate per one million population, "Calculated by the number of individuals tested for a P/T divided by the population of that P/T[(numtested/population) x1,000,000]"* numtoday, Number of new cases since last update, Number of total counts from last update subtracted from total counts of current update* percentoday, Percent change since last update, "Calculated by the number of new cases divided by the previous update's total number of cases[(numtoday/numtotal of previous update)x100]"* ratetotal, Case rate per one hundred thousand population, "Calculated by the total number of cases for a P/T divided by the population of that P/T[(numtotal/population) x100,000]"---Interesting columns from the Alberta Government's dataset:* Date reported* Alberta Health Services Zone* Gender * Age group * Case status * Case type
###Code
url = 'https://health-infobase.canada.ca/src/data/covidLive/covid19-download.csv'
df = pd.read_csv(url)
url = 'https://www.alberta.ca/data/stats/covid-19-alberta-statistics-data.csv'
s = requests.get(url).content
df_demographic = pd.read_csv(io.StringIO(s.decode('utf-8')))
###Output
_____no_output_____
###Markdown
Filter daily epidemiology update dataset to Alberta
###Code
alberta_df = df[df['prname'] == 'Alberta'].copy()
alberta_df.tail(5)
###Output
_____no_output_____
###Markdown
Exploratory data analysis Basic metrics
###Code
canada_df = df[df['prname'] == 'Canada'].copy()
latest_alberta_data = alberta_df.tail(1).copy()
latest_canada_data = canada_df.tail(1).copy()
total_deaths_alberta = latest_alberta_data['numdeaths']
active_cases_alberta = latest_alberta_data['numactive']
latest_reported_daily_cases_alberta = latest_alberta_data['numtoday']
latest_reported_daily_tests_alberta = latest_alberta_data['numteststoday']
active_cases_canada = latest_canada_data['numactive']
latest_reported_daily_cases_canada = latest_canada_data['numtoday']
latest_reported_daily_tests_canada = latest_canada_data['numteststoday']
compare_active_cases = (float(active_cases_alberta) / float(active_cases_canada)) * 100
compare_latest_reported_daily_cases = (float(latest_reported_daily_cases_alberta) / float(latest_reported_daily_cases_canada)) * 100
latest_positivity_rate_alberta = (float(latest_reported_daily_cases_alberta) / float(latest_reported_daily_tests_alberta)) * 100
latest_positivity_rate_canada = (float(latest_reported_daily_cases_canada) / float(latest_reported_daily_tests_canada)) * 100
alberta_population = 4444277
canada_population = 38131104
alberta_canada_percentage = (alberta_population / canada_population) * 100
diff = compare_active_cases / alberta_canada_percentage
print("Basic metrics about COVID in Alberta\n====================================\n")
print("Total deaths in Alberta is {:.0f}\n".format(float(total_deaths_alberta)))
print("Active cases in Alberta is {:.0f}".format(float(active_cases_alberta)))
print("Active cases in Canaada {:.0f}".format(float(active_cases_canada)))
print("Alberta makes up {:.1f}% of the active cases in Canada\n".format(compare_active_cases))
print("Latest reported daily cases in Alberta is {:.0f}".format(float(latest_reported_daily_cases_alberta)))
print("Latest reported daily cases in Canada is {:.0f}".format(float(latest_reported_daily_cases_canada)))
print("Alberta makes up {:.1f}% of the latest reported daily cases in Canada\n".format(compare_latest_reported_daily_cases))
print("Latest reported daily tests in Alberta is {:.0f}".format(float(latest_reported_daily_tests_alberta)))
print("Latest positivity rate in Alberta is {:.1f}%".format(latest_positivity_rate_alberta))
print("Latest positivity rate in Canada is {:.1f}%\n".format(latest_positivity_rate_canada))
print("Alberta is {:.1f}% of Canada's population and has {:.1f}% of the active cases in Canada".format(alberta_canada_percentage, compare_active_cases))
print("Active cases in Alberta is {:.1f}x higher relative to the province's population".format(diff))
###Output
Basic metrics about COVID in Alberta
====================================
Total deaths in Alberta is 2390
Active cases in Alberta is 13495
Active cases in Canaada 34656
Alberta makes up 38.9% of the active cases in Canada
Latest reported daily cases in Alberta is 1401
Latest reported daily cases in Canada is 4161
Alberta makes up 33.7% of the latest reported daily cases in Canada
Latest reported daily tests in Alberta is 12139
Latest positivity rate in Alberta is 11.5%
Latest positivity rate in Canada is 4.7%
Alberta is 11.7% of Canada's population and has 38.9% of the active cases in Canada
Active cases in Alberta is 3.3x higher relative to the province's population
###Markdown
Children metricsI define children as individuals between 0 and 19 years old. My definition is restricted by the way Alberta has broken out age groups (i.e., under 1 year, 1 - 4 years, 5 - 9 years, and 10 - 19 years).
###Code
last_date = pd.to_datetime(latest_alberta_data['date']) - pd.DateOffset(1)
last_date = last_date.astype(str).values[0]
filtered_df_demographic = df_demographic[(df_demographic['Case type'] == 'Confirmed') & (df_demographic['Case status'] != 'Recovered') & (df_demographic['Date reported'] == last_date)].copy()
filtered_df_demographic = filtered_df_demographic.groupby('Age group').count()['Date reported'].copy()
latest_children = filtered_df_demographic['Under 1 year'] + filtered_df_demographic['1-4 years'] + filtered_df_demographic['5-9 years'] + filtered_df_demographic['10-19 years']
children_percent = (latest_children / filtered_df_demographic.sum()) * 100
children_under_1_percent = (filtered_df_demographic['Under 1 year'] / filtered_df_demographic.sum()) * 100
latest_children_1_to_9 = filtered_df_demographic['1-4 years'] + filtered_df_demographic['5-9 years']
children_1_to_9_percent = (latest_children_1_to_9 / filtered_df_demographic.sum()) * 100
children_10_to_19_percent = (filtered_df_demographic['10-19 years'] / filtered_df_demographic.sum()) * 100
print("COVID-19 cases affecting children in Alberta\n============================================\n")
print("Cases with children under 1 year is {:.0f} which is {:.1f}% of Alberta's total daily cases".format(filtered_df_demographic['Under 1 year'], children_under_1_percent))
print("Cases with children 1 - 9 years is {:.0f} which is {:.1f}% of Alberta's total daily cases".format(latest_children_1_to_9, children_1_to_9_percent))
print("Cases with children 10 - 19 years is {:.0f} which is {:.1f}% of Alberta's total daily cases\n".format(filtered_df_demographic['10-19 years'], children_10_to_19_percent))
print("Cases with children 0 - 19 years is {:.0f} which is {:.1f}% of Alberta's total daily cases\n".format(latest_children, children_percent))
###Output
COVID-19 cases affecting children in Alberta
============================================
Cases with children under 1 year is 7 which is 0.5% of Alberta's total daily cases
Cases with children 1 - 9 years is 125 which is 8.9% of Alberta's total daily cases
Cases with children 10 - 19 years is 169 which is 12.0% of Alberta's total daily cases
Cases with children 0 - 19 years is 301 which is 21.4% of Alberta's total daily cases
###Markdown
Visualization
###Code
# Remove data errors
alberta_df = alberta_df[alberta_df['numtoday'] != 0]
dates = alberta_df['date']
date_format = [pd.to_datetime(d) for d in dates]
# Calculate positivity rate and 7 day rolling means for cases, tests, and positivity rate
alberta_df['numtoday_7avg'] = alberta_df['numtoday'].rolling(7).mean()
alberta_df['numteststoday_7avg'] = alberta_df['numteststoday'].rolling(7).mean()
alberta_df['positivity_rate_7avg'] = (alberta_df['numtoday_7avg'] / alberta_df['numteststoday_7avg']) * 100
alberta_df['positivity_rate'] = (alberta_df['numtoday'] / alberta_df['numteststoday']) * 100
fig, ax = plt.subplots(figsize=(18,6))
ax.grid()
ax.scatter(date_format, alberta_df['numtoday'], label = 'Daily cases')
ax.plot(date_format, alberta_df['numtoday_7avg'], color = 'red', linewidth = 4, label = '7-day rolling average for daily cases')
ax.set(title = 'Daily COVID-19 cases in Alberta', xlabel = 'Date', ylabel = 'COVID-19 cases')
fig.legend(loc = 'lower right')
fig.show()
###Output
_____no_output_____
###Markdown
Positivity rate provides an alternative view of the COVID-19's progression. Using the 7 day rolling averages smooths out unwanted data anomalies.
###Code
fig, ax = plt.subplots(figsize=(18,6))
ax.grid()
ax.scatter(date_format, alberta_df['positivity_rate'], label = 'positivity rate')
ax.plot(date_format, alberta_df['positivity_rate_7avg'], color = 'red', linewidth = 4, label = '7-day rolling average positivity rate')
ax.set(title = 'Positivity rate COVID-19 cases in Alberta', xlabel = 'Date', ylabel = 'Positivity rate %')
fig.legend(loc = 'lower right')
fig.show()
###Output
_____no_output_____
###Markdown
Correlation between positivity rate and active cases
###Code
fig, ax1 = plt.subplots(figsize=(18,6))
ax1.grid()
ax1.plot(date_format, alberta_df['numactive'], color = 'deepskyblue', linewidth = 4, label = 'Active cases')
ax1.set(title = 'Correlation between positivity rate and active COVID-19 cases in Alberta', xlabel = 'Date', ylabel = 'Active cases')
ax2 = ax1.twinx()
ax2.set_ylabel('Positivity rate %')
ax2.plot(date_format, alberta_df['positivity_rate_7avg'], color = 'red', linewidth = 4, label = '7-day rolling average positivity rate')
fig.legend(loc = 'lower right')
plt.show()
# Sort age groups
age_groups = ['Under 1 year', '1-4 years', '5-9 years', '10-19 years', '20-29 years', '30-39 years', '40-49 years', '50-59 years', '60-69 years', '70-79 years', '80+ years']
mapping = {age: i for i, age in enumerate(age_groups)}
key = filtered_df_demographic.index.map(mapping)
filtered_df_demographic = filtered_df_demographic.iloc[key.argsort()]
plt.subplots(figsize=(18,6))
ax = filtered_df_demographic.plot.bar()
ax.set_ylabel('COVID-19 cases')
ax.set_title('Daily COVID-19 cases in Alberta by age group')
rects = ax.patches
labels = filtered_df_demographic
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(
rect.get_x() + rect.get_width() / 2, height + 5, label, ha="center", va="bottom"
)
plt.show()
print("Daily COVID-19 Cases in Alberta by age group\n============================================")
filtered_df_demographic
###Output
Daily COVID-19 Cases in Alberta by age group
============================================
|
_notebooks/preimaging/preimaging_01_mirage.ipynb | ###Markdown
NIRSpec Pre-Imaging with NIRCam **Use case:** Simulation of NIRCam pre-imaging for NIRSpec.**Data:** JWST simulated NIRCam data from MIRAGE; LMC.**Tools:** mirage, jwst, astropy, grismconf, nircam_gsim.**Cross-intrument:** NIRCam. **Documentation:** This notebook is part of a STScI's larger [post-pipeline Data Analysis Tools Ecosystem](https://jwst-docs.stsci.edu/jwst-post-pipeline-data-analysis). IntroductionThis notebook shows step-by-step instructions to simulate images of the JWST LMC astrometric calibration field. The NIRCam images are simulated using the software [MIRAGE](https://jwst-docs.stsci.edu/jwst-other-tools/mirage-data-simulator). The observation is designed in APT. The APT output is used as input of MIRAGE.This Notebook must be executed from an environment that has MIRAGE installed. Follow the instructions in the [Installing MIRAGE webpage](https://mirage-data-simulator.readthedocs.io/en/latest/install.html) before executing this Jupyter Notebook.
###Code
import os
from glob import glob
import shutil
import yaml
import zipfile
import urllib.request
# mirage imports
from mirage import imaging_simulator
from mirage.seed_image import catalog_seed_image
from mirage.dark import dark_prep
from mirage.ramp_generator import obs_generator
from mirage.yaml import yaml_generator
from astropy.table import Table
from astropy.io import fits
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Setting things up After activating the environment with MIRAGE and beginning a Jupyter Notebook session, we begin by defining the working directory
###Code
path='./.' # write here your working directory
os.chdir(path)
pwd
###Output
_____no_output_____
###Markdown
*Developer Note:*Find out a way to install the mirage data for the testing CI. Right now the data size is too Mirage is accompanied by a set of reference files that are used to construct the simulated data. Here we define the location of the MIRAGE data. This is the directory that contains the reference files associated with MIRAGE. For users at STScI, this is the location of MIRAGE data:
###Code
if os.environ.get('MIRAGE_DATA', None) is None:
os.environ['MIRAGE_DATA'] = '/path/to/mirage_data/'
###Output
_____no_output_____
###Markdown
If the user is outside of STScI then the reference files must be downloaded using the "downloader" module. Please follow the instructions in https://mirage-data-simulator.readthedocs.io/en/latest/reference_files.html and create an appropriate MIRAGE_DATA location. Download Data
###Code
boxlink = 'https://data.science.stsci.edu/redirect/JWST/jwst-data_analysis_tools/preimaging_notebooks/preimaging.zip'
boxfile = './preimaging.zip'
# Download zip file
if not os.path.exists( boxfile):
urllib.request.urlretrieve(boxlink, boxfile)
zf = zipfile.ZipFile(boxfile, 'r')
zf.extractall()
###Output
_____no_output_____
###Markdown
Generating input yaml filesWe begin the simulation using the programme's APT file. The xml and pointings files must be exported from APT, and are then used as input to the yaml_generator, which will generate a series of yaml input files.From APT we export two files: the xml and pointing files. These should be in the working directory.
###Code
# Specify the xml and pointing files exported from APT
xml_file = os.path.join('preimaging', 'NRC21_pid1069_2018_rev2.xml')
pointing_file = os.path.join('preimaging', 'NRC21_pid1069_2018_rev2.pointing')
###Output
_____no_output_____
###Markdown
Additional optional data to be included.
###Code
# Optionally set the telescope roll angle (PAV3) for the observations
pav3=0.0
# Define the output directory
output_dir = path
###Output
_____no_output_____
###Markdown
In this example we create NIRCam images based on a catalogue (all_filters_lmc.cat) of point sources. This catalogue contains the AB magnitude of each source in the following six filters: F070W, F150W, F200W, F277W, F356W, and F444W. The dictionary of catalogs must use the APT target names as keys, for example `LMC-ASTROMETRIC-FIELD`. Full details on yaml_generator input options are given here: https://mirage-data-simulator.readthedocs.io/en/latest/yaml_generator.html This is what the input catalogue looks like. Space separated values with an uncommented header line. ``` position_RA_Dec abmag index x_or_RA y_or_Dec nircam_f070w_magnitude nircam_f150w_magnitude nircam_f200w_magnitude nircam_f277w_magnitude nircam_f356w_magnitude nircam_f444w_magnitude1 80.386396453731 -69.468909240644 21.63889 21.59946 21.93288 22.51786 22.99632 23.42552 80.385587687224 -69.469200540277 20.42033 20.05396 20.32926 20.92191 21.37946 21.833213 80.38036547567 -69.470930464875 21.8158 21.86888 22.2175 22.8008 23.28381 23.70644 80.388130492656 -69.468453170293 21.11582 20.8028 21.08802 21.67932 22.14077 22.590485 80.388935773363 -69.468195831029 21.76617 21.80178 22.14757 22.73117 23.21336 23.63717``` For more information look at the following link https://github.com/spacetelescope/mirage/blob/master/examples/Catalog_Generation_Tools.ipynb
###Code
# Source catalogs to be used
cat_dict = { 'LMC-ASTROMETRIC-FIELD': {'nircam': {'point_source': 'preimaging/all_filters_lmc.cat'} ,
'fgs': {'point_source': 'dummy.cat'} } ,
'2 LMC-ASTROMETRIC-FIELD': {'nircam': {'point_source': 'preimaging/all_filters_lmc.cat'} ,
'fgs': {'point_source': 'dummy.cat'} } }
###Output
_____no_output_____
###Markdown
Running the yaml_generatorThis will create a collection of yaml files that will be used as input when creating the simulated data. There will be one yaml file for each detector and exposure, so there can be quite a few files created if your programme has lots of exposures or dithers. This LMC programme will generate 528 files using six NIRCam filters and the JWST FGS.
###Code
# Run the yaml generator
yam = yaml_generator.SimInput(xml_file, pointing_file,
catalogs=cat_dict,
verbose=True,
simdata_output_dir=output_dir,
output_dir=output_dir,
roll_angle=pav3,
# to do : explain linear vs raw
datatype='linear,raw')
yam.use_linearized_darks = True
yam.create_inputs()
###Output
_____no_output_____
###Markdown
Organizing files according to filter These notebooks will generate a large amount of data and it is useful to keep it organized in sub directories.yaml: all the yaml files organized according to filtermirage_output: linear and uncal filespipeline_level1: rate filespipeline_level2: cal files
###Code
path = os.getcwd()
files = glob('jw*yaml')
allfiles = glob('jw*')
if not os.path.exists(os.path.join(path,'mirage_output')):
os.mkdir(os.path.join(path,'mirage_output'))
if not os.path.exists(os.path.join(path,'pipeline_level1')):
os.mkdir(os.path.join(path,'pipeline_level1'))
if not os.path.exists(os.path.join(path,'pipeline_level2')):
os.mkdir(os.path.join(path,'pipeline_level2'))
if not os.path.exists(os.path.join(path,'yaml')):
os.mkdir(os.path.join(path,'yaml'))
###Output
_____no_output_____
###Markdown
Here we store the yaml files in the yaml directory organized according to filter. The cell below will fail if the files have already been relocated before. If you want to intentionally re-do this step, please manually remove the previous files from the output directory.
###Code
# we organize files according to filter
for yamlfile in files:
with open(yamlfile, 'r') as stream: #open the yaml file in read mode
doc = yaml.load(stream, Loader=yaml.FullLoader)
filtname = doc['Readout']['filter'] #read the filter keyword
if not os.path.exists(os.path.join(path,'yaml',filtname.lower())):
os.mkdir(os.path.join(path,'yaml',filtname.lower()))
filetomove = yamlfile
input_file = filetomove
output_file = os.path.join(path,'yaml',filtname.lower())
print('input = ',input_file)
print('output = ',output_file)
shutil.move(input_file, output_file) #move the file to the corresponding sub directory
###Output
_____no_output_____
###Markdown
Execute MIRAGE and create simulated dataNow that the yaml files have been generated, we can execute MIRAGE using them as input parameters and generate the NIRCam images. As an example, let us choose filter F150W. We are going to simulate all of the images that were observed using filter F150W. The variable "listname" contains the names of the yaml files that we want to process through MIRAGE. There are 128 F150W yaml files.
###Code
# input parameters
filtname = 'f150w'
# copy the F150W yaml files back in the working directory
cwd = os.getcwd()
filter_pattern = os.path.join(cwd,'yaml',filtname.lower(),'jw*yaml')
files = glob(filter_pattern)[:]
listname = files
for yamlfile in files:
input_file = yamlfile
output_file = cwd
print('input = ',input_file)
print('output = ',output_file)
shutil.copy(input_file, output_file) #this copies over filter files
# read the list of yaml files to process
t = Table.read(listname, format='ascii.fast_no_header')
input_yaml = t['col1']
yaml_list = []
for k in range(len(input_yaml)):
yaml_list.append(input_yaml[k])
print(yaml_list)
files = yaml_list
paramlist = yaml_list
print(files)
###Output
_____no_output_____
###Markdown
From each yaml file, Mirage will produce a noiseless seed image, a "raw" [(level 1b) file](https://jwst-pipeline.readthedocs.io/en/stable/jwst/data_products/science_products.html?highlight=uncaluncalibrated-raw-data-uncal), and a linearized ramp (equivalent to the output of the linearity correction step of the [calwebb_detector1 pipeline](https://jwst-pipeline.readthedocs.io/en/stable/jwst/pipeline/calwebb_detector1.html))
###Code
# here we loop over the yaml files and run MIRAGE in a sequence
# this step will take a long time to run
for yamlfile in files:
print('---------------------processing ',yamlfile,' -------------------------------')
# run Mirage
sim = imaging_simulator.ImgSim()
sim.paramfile = yamlfile
sim.create()
###Output
_____no_output_____
###Markdown
Examine the outputHere we display the output files generated by MIRAGE. The UNCAL file is the raw uncalibrated file. Seed imageThe seed image contains only the signal from the astronomical sources and background. There are no detector effects, nor cosmic rays added to this count rate image.
###Code
def show(array,title,min=0,max=1000):
plt.figure(figsize=(12,12))
plt.imshow(array,clim=(min,max))
plt.title(title)
plt.colorbar().set_label('DN$^{-}$/s')
seed_file = 'jw01069001001_01101_00003_nrca2_uncal_F150W_final_seed_image.fits'
with fits.open(seed_file) as hdulist:
seed_data = hdulist[1].data
print(seed_data.shape)
show(seed_data,'Seed Image',max=5)
###Output
_____no_output_____
###Markdown
Linear file exampleMIRAGE generates the linear and uncalibrated files. Here we display an example linear file.
###Code
linear_file = 'jw01069001001_01101_00003_nrca2_linear.fits'
with fits.open(linear_file) as hdulist:
linear_data = hdulist['SCI'].data
print(linear_data.shape)
# this image has five groups
# we display the last group
show(linear_data[0, 4, :, :], "Final Group linear file", max=250)
###Output
_____no_output_____
###Markdown
Raw uncalibrated file exampleFirst let us display a single group, which is dominated by noise and detector artifacts.
###Code
raw_file = 'jw01069001001_01101_00003_nrca2_uncal.fits'
with fits.open(raw_file) as hdulist:
raw_data = hdulist['SCI'].data
print(raw_data.shape)
# the image has five groups. Here we display the last group
show(raw_data[0, 4, :, :], "Final Group uncal file", max=15000)
###Output
_____no_output_____
###Markdown
Many of the instrumental artifacts can be removed by looking at the difference between two groups. Raw data values are integers, so first make the data floats before doing the subtraction.
###Code
show(1. * raw_data[0, 4, :, :] - 1. * raw_data[0, 0, :, :], "Last Minus First Group uncal file", max=200)
###Output
_____no_output_____ |
yukawa/yukawa.ipynb | ###Markdown
Custom Nonbonded Potential: Yukawa on rigid bodiesHere we define a custom force class where particles interact through a Yukawa potential and a soft repulsion,\begin{equation}w(r) / k_BT = \frac{\lambda_Bz_iz_j}{r}e^{-r/\lambda_D} + 4\beta\epsilon_{ij} \left ( \frac{\sigma_{ij}}{r}\right )^{12}\end{equation}where$\lambda_B=e^2/4\pi\epsilon_0\epsilon_rk_BT$ and$\lambda_D=(4\pi\lambda_B\sum \rho_iz_i^2)^{-1/2}$ are the Bjerrum and Debye lengths, respectively. $\rho_i$ is the number density of the $i$th ion.In this example we also create two rigid bodies using harmonic bonds to constrain the positions.Some comments:1. The potential is defined in `CustomNonbonded` is defined in `cg.zml` and must return energy in `kJ/mol`.2. The Bjerrum and Debye lengths are set via global parameters
###Code
%matplotlib inline
import numpy as np
from __future__ import print_function
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
from sys import stdout, exit
import math
import mdtraj as mdtraj
from itertools import combinations
###Output
_____no_output_____
###Markdown
Simulation setup
###Code
cutoff = 50*unit.angstrom
useMinimize = True
epsilon_r = 80.
temperature = 300*unit.kelvin
kT = unit.BOLTZMANN_CONSTANT_kB*temperature
timestep = 10*unit.femtoseconds;
steps_eq = 5000
steps_production = 2e4
steps_total = steps_eq + steps_production
###Output
_____no_output_____
###Markdown
Convenience functionsA set of independent functions, useful for setting up OpenMM.
###Code
def findForce(system, forcetype, add=True):
""" Finds a specific force in the system force list - added if not found."""
for force in system.getForces():
if isinstance(force, forcetype):
return force
if add==True:
system.addForce(forcetype())
return findForce(system, forcetype)
return None
def setGlobalForceParameter(force, key, value):
for i in range(force.getNumGlobalParameters()):
if force.getGlobalParameterName(i)==key:
print('setting force parameter', key, '=', value)
force.setGlobalParameterDefaultValue(i, value);
def atomIndexInResidue(residue):
""" list of atom index in residue """
index=[]
for a in list(residue.atoms()):
index.append(a.index)
return index
def getResiduePositions(residue, positions):
""" Returns array w. atomic positions of residue """
ndx = atomIndexInResidue(residue)
return np.array(positions)[ndx]
def uniquePairs(index):
""" list of unique, internal pairs """
return list(combinations( range(index[0],index[-1]+1),2 ) )
def addHarmonicConstraint(harmonicforce, pairlist, positions, threshold, k):
""" add harmonic bonds between pairs if distance is smaller than threshold """
print('Constraint force constant =', k)
for i,j in pairlist:
distance = unit.norm( positions[i]-positions[j] )
if distance<threshold:
harmonicforce.addBond( i,j,
distance.value_in_unit(unit.nanometer),
k.value_in_unit( unit.kilojoule/unit.nanometer**2/unit.mole ))
print("added harmonic bond between", i, j, 'with distance',distance)
def addExclusions(nonbondedforce, pairlist):
""" add nonbonded exclusions between pairs """
for i,j in pairlist:
nonbondedforce.addExclusion(i,j)
def rigidifyResidue(residue, harmonicforce, positions, nonbondedforce=None,
threshold=6.0*unit.angstrom, k=2500*unit.kilojoule/unit.nanometer**2/unit.mole):
""" make residue rigid by adding constraints and nonbonded exclusions """
index = atomIndexInResidue(residue)
pairlist = uniquePairs(index)
addHarmonicConstraint(harmonic, pairlist, pdb.positions, threshold, k)
if nonbondedforce is not None:
for i,j in pairlist:
print('added nonbonded exclusion between', i, j)
nonbonded.addExclusion(i,j)
def centerOfMass(positions, box):
""" Calculates the geometric center taking into account periodic boundaries
More here: https://en.wikipedia.org/wiki/Center_of_mass#Systems_with_periodic_boundary_conditions
"""
theta=np.divide(positions, box).astype(np.float) * 2*np.pi
x1=np.array( [np.cos(theta[:,0]).mean(), np.cos(theta[:,1]).mean(), np.cos(theta[:,2]).mean()] )
x2=np.array( [np.sin(theta[:,0]).mean(), np.sin(theta[:,1]).mean(), np.sin(theta[:,2]).mean()] )
return box * (np.arctan2(-x1,-x2)+np.pi) / (2*np.pi)
###Output
_____no_output_____
###Markdown
Setup simulation
###Code
pdb = app.PDBFile('squares.pdb')
forcefield = app.ForceField('yukawa.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.CutoffPeriodic, nonbondedCutoff=cutoff )
box = np.array(pdb.topology.getPeriodicBoxVectors()).diagonal()
harmonic = findForce(system, mm.HarmonicBondForce)
nonbonded = findForce(system, mm.CustomNonbondedForce)
setGlobalForceParameter(nonbonded, 'lB', 0.7*unit.nanometer)
setGlobalForceParameter(nonbonded, 'kappa', 0.0)
for residue in pdb.topology.residues():
p = getResiduePositions(residue, pdb.positions)
print(centerOfMass(p, box))
rigidifyResidue(residue, harmonicforce=harmonic, nonbondedforce=nonbonded, positions=pdb.positions)
integrator = mm.LangevinIntegrator(temperature, 1.0/unit.picoseconds, timestep)
integrator.setConstraintTolerance(0.0001)
###Output
setting force parameter lB = 0.7 nm
setting force parameter kappa = 0.0
[Quantity(value=2.3, unit=nanometer) Quantity(value=2.3, unit=nanometer)
Quantity(value=2.5, unit=nanometer)]
Constraint force constant = 2500 kJ/(nm**2 mol)
added harmonic bond between 0 1 with distance 0.4 nm
added harmonic bond between 0 2 with distance 0.565685424949 nm
added harmonic bond between 0 3 with distance 0.4 nm
added harmonic bond between 1 2 with distance 0.4 nm
added harmonic bond between 1 3 with distance 0.565685424949 nm
added harmonic bond between 2 3 with distance 0.4 nm
added nonbonded exclusion between 0 1
added nonbonded exclusion between 0 2
added nonbonded exclusion between 0 3
added nonbonded exclusion between 1 2
added nonbonded exclusion between 1 3
added nonbonded exclusion between 2 3
[Quantity(value=2.3, unit=nanometer) Quantity(value=2.3, unit=nanometer)
Quantity(value=1.9000000000000001, unit=nanometer)]
Constraint force constant = 2500 kJ/(nm**2 mol)
added harmonic bond between 4 5 with distance 0.4 nm
added harmonic bond between 4 6 with distance 0.565685424949 nm
added harmonic bond between 4 7 with distance 0.4 nm
added harmonic bond between 5 6 with distance 0.4 nm
added harmonic bond between 5 7 with distance 0.565685424949 nm
added harmonic bond between 6 7 with distance 0.4 nm
added nonbonded exclusion between 4 5
added nonbonded exclusion between 4 6
added nonbonded exclusion between 4 7
added nonbonded exclusion between 5 6
added nonbonded exclusion between 5 7
added nonbonded exclusion between 6 7
###Markdown
Run simulation
###Code
simulation = app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
if useMinimize:
print('Minimizing...')
simulation.minimizeEnergy()
print('Equilibrating...')
simulation.context.setVelocitiesToTemperature(300*unit.kelvin)
simulation.step(steps_eq)
simulation.reporters.append(mdtraj.reporters.HDF5Reporter('trajectory.h5', 100))
simulation.reporters.append(
app.StateDataReporter(stdout, int(steps_total/10), step=True,
potentialEnergy=True, temperature=True, progress=True, remainingTime=False,
speed=True, totalSteps=steps_total, volume=True, separator='\t'))
print('Production...')
simulation.step(steps_production)
print('Done!')
###Output
Minimizing...
Equilibrating...
Production...
#"Progress (%)" "Step" "Potential Energy (kJ/mole)" "Temperature (K)" "Box Volume (nm^3)" "Speed (ns/day)"
30.0% 7500 11.0131477032 365.096087233 1000.0 0
40.0% 10000 4.8144297977 471.471481815 1000.0 2.73e+03
50.0% 12500 12.0211700662 206.068451567 1000.0 2.68e+03
60.0% 15000 17.2765251058 457.505593603 1000.0 2.62e+03
70.0% 17500 19.6206031144 303.497468184 1000.0 2.61e+03
80.0% 20000 10.5103588598 354.879128936 1000.0 2.57e+03
90.0% 22500 6.9912172528 555.040070009 1000.0 2.56e+03
100.0% 25000 4.45059069199 277.202268479 1000.0 2.56e+03
Done!
|
Final Project/test.ipynb | ###Markdown
Model 1
###Code
#Creation of model 1
model = Sequential()
# model.add(Conv2D(32, (3, 3), input_shape=(256, 256, 3)))
model.add(Conv2D(32, (3, 3), input_shape=(384, 512, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(1))
# model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.compile(loss='categorical_crossentropy', optimizer= 'rmsprop', metrics = ['accuracy'])
img_height = 384
img_width = 512
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2) # set validation split
train_generator = train_datagen.flow_from_directory(
directory,
target_size=(img_height, img_width),
batch_size=batch_size,
# class_mode='binary',
class_mode = 'categorical',
subset='training') # set as training data
validation_generator = train_datagen.flow_from_directory(
directory, # same directory as training data
target_size=(img_height, img_width),
batch_size=batch_size,
# class_mode='binary',
class_mode = 'categorical',
subset='validation') # set as validation data
model.fit_generator(
train_generator,
steps_per_epoch = train_generator.samples // batch_size,
validation_data = validation_generator,
validation_steps = validation_generator.samples // batch_size,
epochs = 10
)
###Output
Found 1816 images belonging to 5 classes.
Found 454 images belonging to 5 classes.
###Markdown
Model 2
###Code
#Creation of model 2
model2 = Sequential()
# model.add(Conv2D(32, (3, 3), input_shape=(256, 256, 3)))
model2.add(Conv2D(32, kernel_size=(5, 5), padding='same', activation='relu', input_shape=(28,28,1)))
# model2.add(Conv2D(32, (3, 3), input_shape=(384, 512, 3)))
# model2.add(Activation('relu'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Conv2D(32, kernel_size=(5, 5), padding='same', activation='relu', input_shape=(12,12,20)))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Flatten())
model2.add(Dense(64))
model2.add(Activation('relu'))
model2.add(Dense(1))
# model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model2.compile(loss='categorical_crossentropy', optimizer= 'rmsprop', metrics = ['accuracy'])
# #
# #
# #
# #CNN2
# # Define model
# CNN2 = Sequential()
# # CNN2.add(Dropout(0.5, input_shape=(28,28,1)))
# #CNN2.add(Dense(28, input_shape=(28,28,1), activation='sigmoid'))
# CNN2.add(Conv2D(32, kernel_size=(5, 5), padding='same', activation='relu', input_shape=(28,28,1)))
# CNN2.add(MaxPooling2D(pool_size=(2, 2)))
# CNN2.add(Conv2D(32, kernel_size=(5, 5), padding='same', activation='relu', input_shape=(12,12,20)))
# CNN2.add(MaxPooling2D(pool_size=(2, 2)))
# CNN2.add(Flatten())
# CNN2.add(Dense(100, activation='relu'))
# CNN2.add(Dense(10, activation='softmax'))
# CNN2.summary()
# # Compile and fit
# CNN2.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# hist = CNN2.fit(X_train, y_train, batch_size=10, epochs=60, validation_data= (X_val, y_val), verbose=2, shuffle=True)
# # Evaluate
# score = CNN2.evaluate(X_test, y_test, verbose=0)
# accuracy = 100*score[1]
# print('Test accuracy: %.4f%%' % accuracy)
train_datagen2 = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2) # set validation split
train_generator2 = train_datagen2.flow_from_directory(
directory,
target_size=(img_height, img_width),
batch_size=batch_size,
# class_mode='binary',
class_mode = 'categorical',
subset='training') # set as training data
validation_generator2 = train_datagen2.flow_from_directory(
directory, # same directory as training data
target_size=(img_height, img_width),
batch_size=batch_size,
# class_mode='binary',
class_mode = 'categorical',
subset='validation') # set as validation data
model2.fit_generator(
train_generator2,
steps_per_epoch = train_generator2.samples // batch_size,
validation_data = validation_generator2,
validation_steps = validation_generator2.samples // batch_size,
epochs = 10
)
###Output
_____no_output_____ |
Classify Handwritten Digits CNNs.ipynb | ###Markdown
Handwritten Digit Classifier¶ Written by: Jean Pierre C. Aldama Date: 3/7/2020 6:40 AM [Quaxis Research/Innovation] Description: This notebook classifies handwritten digit images as a number 0 - 9 Using Convolutional Neural Networks. Same as the ANN's notebook minus ANN's Requirements: pip install tensorflow keras numpy mnist matplotlib we recommend installing the latest version of Anaconda which can be downloaded at https://www.anaconda.com/distribution/download-section (We are using Anaconda for python 3.x) Step 1: Import required libraries
###Code
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras.datasets import mnist
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Step 2: Load the data, split it into train and test datasets
###Code
(x_train, y_train), (x_test, y_test) = mnist.load_data()
###Output
_____no_output_____
###Markdown
Step 3: Get the image shape
###Code
print(x_train.shape)
print(x_test.shape)
###Output
(60000, 28, 28)
(10000, 28, 28)
###Markdown
Step 4: Peek at the first image @ index 0 in the training dataset
###Code
x_train[0]
###Output
_____no_output_____
###Markdown
Step 5: Print the image label
###Code
print(y_train[0])
plt.imshow(x_train[0])
###Output
5
###Markdown
Step 6: Reshape the data to fir the model
###Code
x_train = x_train.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000, 28, 28, 1)
###Output
_____no_output_____
###Markdown
Step 7: One Hot Encoding, print the new label
###Code
y_train_one_hot = to_categorical(y_train)
y_test_one_hot = to_categorical(y_test)
print(y_train_one_hot[0])
###Output
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
###Markdown
Step 8: Build the CNN model
###Code
model = Sequential()
model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(28,28,1)))
model.add(Conv2D(32, kernel_size=3, activation='relu'))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
###Output
_____no_output_____
###Markdown
Step 9: Compile the model
###Code
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Step 10: Train the model
###Code
hist = model.fit(x_train, y_train_one_hot, validation_data=(x_test, y_test_one_hot), epochs=3)
###Output
Train on 60000 samples, validate on 10000 samples
Epoch 1/3
60000/60000 [==============================] - 149s 2ms/step - loss: 0.2622 - accuracy: 0.9514 - val_loss: 0.0942 - val_accuracy: 0.9719
Epoch 2/3
60000/60000 [==============================] - 157s 3ms/step - loss: 0.0719 - accuracy: 0.9788 - val_loss: 0.0794 - val_accuracy: 0.9752
Epoch 3/3
60000/60000 [==============================] - 142s 2ms/step - loss: 0.0489 - accuracy: 0.9847 - val_loss: 0.1000 - val_accuracy: 0.9772
###Markdown
Step 11: Visualize the models accuracy
###Code
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Convolutional Neural Network Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Step 12: Show predictions as probabilities for the first 5 images in the test data set
###Code
predictions = model.predict(x_test[:5])
###Output
_____no_output_____
###Markdown
Step 13: Print predictions as number labels for the first 5 images then print the actual labels
###Code
prediction_label = np.argmax(predictions, axis=1)
actual_label = y_test[:5]
print(prediction_label)
print(actual_label)
###Output
[7 2 1 0 4]
[7 2 1 0 4]
###Markdown
Step 14: Show the first 5 images as plot images
###Code
for i in range(0,5):
image = x_test[i]
image = np.array(image,dtype='float')
pixels = image.reshape((28,28))
plt.imshow(pixels, cmap='gray')
plt.show()
###Output
_____no_output_____ |
18CSE010-Assignment 3.ipynb | ###Markdown
Assignment 2 Basic Manuplation with datasets
###Code
import pandas as pd
df=pd.read_csv("stud loan.csv")
df
df.shape
df.head()
df.tail()
#checking for missing values
df.isnull().sum()
df.columns#printing columns
df.info()#object represents the string datatype
df.describe() #it describe the statistcal data std-standard deviation
#check for unique values in
df.education.unique()
#check for duplicates
df.duplicated().sum()
###Output
_____no_output_____
###Markdown
Numpy manuplations
###Code
import numpy as np
#1D array
a=np.array([1,2,3])
print(a)
#2D array
a=np.array([(1,2,3),(4,5,6)])
print(a)
#finding the dimension of array
a = np.array([(1,2,3),(4,5,6)])
print(a.ndim)
#finding the itemsize
a = np.array([(1,2,3)])
print(a.itemsize)
#finding the particular datatype
a = np.array([(1,2,3)])
print(a.dtype)
#finding the size and dimension of array
a = np.array([(1,2,3,4,5,6)])
print(a.size)
print(a.shape)
#reshaping the dimensions
a = np.array([(8,9,10),(11,12,13)])
a=a.reshape(3,2)
print(a)
#FINDING THE MAX,MIN AND sum
a= np.array([1,2,3])
print(a.min())
print(a.max())
print(a.sum())
#multiplications subtraction and division of two matrices
x= np.array([(1,2,3),(3,4,5)])
y= np.array([(1,2,3),(3,4,5)])
print(x-y)
print(x*y)
print(x/y)
###Output
[[0 0 0]
[0 0 0]]
[[ 1 4 9]
[ 9 16 25]]
[[1. 1. 1.]
[1. 1. 1.]]
|
geospatial-kenyan-universities.ipynb | ###Markdown
Function to produce an iframe of an interactive map.
###Code
def embed_map(m, file_name):
from IPython.display import IFrame
m.save(file_name)
return IFrame(file_name, width='100%', height='500px')
###Output
_____no_output_____
###Markdown
Reading in the data,
###Code
universities = pd.read_csv("../input/d/eswillz/kenya-shapefiles/ken_un.csv")
universities.head()
###Output
_____no_output_____
###Markdown
Plotting it out the universities on a responsive map. Clicking on a point shows the name of the university.
###Code
# Creating a base map
m_2 = folium.Map(location=[-1.18064, 36.93100], zoom_start=13)
# Adding a Marker for each university
for idx, row in universities.iterrows():
Marker([row['lat'], row['lon']], popup=row['name']).add_to(m_2)
# Show the map
embed_map(m_2, 'uni.html')
###Output
_____no_output_____ |
kaggle/tps-11-21-handling-noisy-labels-w-cleanlab.ipynb | ###Markdown
Learning with Mislabeled DataIn this notebook, we will explore the [cleanlab](https://github.com/cleanlab/cleanlab) library which provides functions for "finding, quantifying, and learning with label errors in datasets." In particular, we will do the following:1. Use `get_noise_indices` to detect mislabeled training labels2. Use the `LearningWithNoisyLabels` wrapper with various scikit-learn compatible models to make predictions despite the mislabeled data.**Note:** We use the leaked training labels to test some of our cleanlab functions, however we won't use it to train any models. -- Credits --This notebook was inspired by the following discussions/notebooks:* [This discussion](https://www.kaggle.com/c/tabular-playground-series-nov-2021/discussion/285503) about the mislabeled training data and the [accompanying notebook](https://www.kaggle.com/motloch/nov21-mislabeled-25).* [This notebook](https://www.kaggle.com/criskiev/game-over-or-eda-of-the-leaked-train-csv) where the [original training labels](https://www.kaggle.com/criskiev/november21) were posted. * [This notebook](https://www.kaggle.com/kalaikumarr/comparing-22-different-classification-models) which gets baselines for various models.* [This notebook](https://www.kaggle.com/kaaveland/tps-nov-2021-some-models-that-work-ok) which tests various sklearn classifiers. I used this notebook to pick models (and sometimes parameters) to test with the `LearningWithNoisyLabels` wrapper.* [This notebook](https://www.kaggle.com/sugamkhetrapal/tps-nov-2021-1-14-xgboost-linear) which uses XGBoost with linear models (rather than trees as usual).Please check these out (and upvote them!).
###Code
# Global variables for testing changes to this notebook quickly
RANDOM_SEED = 0
NUM_FOLDS = 8
# Install cleanlab
!pip install -q cleanlab
# Generic imports
import numpy as np
import pandas as pd
import time
import gc
# Hide warnings
import warnings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings('ignore')
# Plotting
import matplotlib.pyplot as plt
# cleanlab
import cleanlab
from cleanlab.pruning import get_noise_indices
from cleanlab.classification import LearningWithNoisyLabels
# Preprocessing
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler
# Models & Evaluation
from sklearn.metrics import roc_auc_score, confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score, recall_score, precision_score
from sklearn.model_selection import StratifiedKFold, cross_val_predict
# Models
from sklearn.base import clone
from sklearn.utils.extmath import softmax
from sklearn.linear_model import LogisticRegression, RidgeClassifier, SGDClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.naive_bayes import MultinomialNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import LinearSVC
from sklearn.neural_network import MLPClassifier
from xgboost import XGBClassifier
# Load data
original_train = pd.read_csv('../input/november21/train.csv')
train = pd.read_csv('../input/tabular-playground-series-nov-2021/train.csv')
test = pd.read_csv('../input/tabular-playground-series-nov-2021/test.csv')
submission = pd.read_csv('../input/tabular-playground-series-nov-2021/sample_submission.csv')
# Feature columns
features = [col for col in train.columns if col not in ['id', 'target']]
# Check that the two train.csv are the same (except for the target)
print(train[features].equals(original_train[features]))
# Save space
y_actual = original_train['target'].copy()
del original_train
gc.collect()
###Output
True
###Markdown
1. Find Label ErrorsIn this section we use cleanlab functions to detect which labels are mislabeled. In particular, we do the following:1. Use logistic regression to estimate train label probabilities (from `predict_proba`)2. `get_noisy_indices` to get the mislabled examples3. Compare with the actual mislabeled examples from the leaked training data.
###Code
# fix labels, assumes input is pandas dataframe/series
def fix_labels(X_train, y_train, y_actual):
y_train = y_train.reset_index(drop = True)
y_actual = y_actual.reset_index(drop = True)
# Logistic regression
pipeline = make_pipeline(
StandardScaler(),
LogisticRegression(
solver = 'saga',
random_state = RANDOM_SEED
),
)
# Label probabilities
label_prob = cross_val_predict(
estimator = pipeline,
X = X_train,
y = y_train,
cv = StratifiedKFold(
n_splits = NUM_FOLDS,
shuffle = True,
random_state = RANDOM_SEED
),
n_jobs = -1,
method = "predict_proba",
)
# Estimate label errors
pred_errors = get_noise_indices(
s = y_train,
psx = label_prob,
sorted_index_method='normalized_margin',
)
# Actual label errors
actual_errors = y_actual.index[y_train != y_actual].to_numpy()
# Indicator vectors for label errors
y_true = y_actual.copy()
y_pred = y_train.copy()
y_pred.values[:] = 0
y_pred.iloc[pred_errors] = 1
y_true.values[:] = 0
y_true.iloc[actual_errors] = 1
# Add "fixed" target labels
fixed = y_train.copy()
fixed.iloc[pred_errors] = (y_train.iloc[pred_errors] + 1) % 2
return fixed, y_pred, y_true
%%time
pred_labels, pred_errors, true_errors = fix_labels(train[features], train['target'], y_actual)
# Analysis
print("Total Rows:", len(pred_labels))
print("Actual Errors:", true_errors.sum())
print("Estimated Errors:", pred_errors.sum())
print("\nAccuracy:", round(accuracy_score(true_errors, pred_errors), 3))
print("Precision:", round(precision_score(true_errors, pred_errors), 3))
print("Recall:", round(recall_score(true_errors, pred_errors), 3))
# Confusion matrix
cm = confusion_matrix(true_errors, pred_errors)
disp = ConfusionMatrixDisplay(confusion_matrix=cm)
disp.plot()
plt.title("Confusion Matrix")
plt.xlabel("Predicted Errors")
plt.ylabel("Actual Errors")
plt.show()
###Output
Total Rows: 600000
Actual Errors: 150704
Estimated Errors: 149976
Accuracy: 0.979
Precision: 0.96
Recall: 0.956
###Markdown
2. Testing Models with Noisy DataIn this section, we use a cleanlab function to make predictions on the partially mislabeled data using various scikit-learn compatibles models. We will do the following for each model:1. Get a baseline by training the vanilla model on the ~1/4 mislabeled training data2. Use `LearningWithNoisyLabels` to wrap the model and train on the same folds.We check each of the following models:* Logistic Regression* Ridge Regression* Linear Discriminant Analysis* SGDClassifier* XGBoost* Multi-layer Perceptron Classifier**Note (1):** The wrapper expects a scikit-learn compatible estimators with `.fit()`, `.predict()` and `.predict_proba()` methods. Not all of these estimators have `.predict_proba()` methods so we have to extend them by defining our own (using the decision function and softmax).**Note (2):** The wrapper function attempts to fix the mislabeled data using cross-validation so instead of training one model per fold, we are actually training 5 models per fold. Hence, we should expect significantly longer training times. Scoring FunctionsThe following functions accept a scikit-learn compatible model or pipeline with fit, predict and predict_proba methods and return auc scores, out-of-fold predictions and test set predictions (averaged over each fold) for the vanilla models and the wrapped models, respectively.
###Code
# Scoring/Training Baseline Function
def train_model(sklearn_model):
# Store the holdout predictions
oof_preds = np.zeros((train.shape[0],))
test_preds = np.zeros((test.shape[0],))
scores = np.zeros(NUM_FOLDS)
times = np.zeros(NUM_FOLDS)
print('')
# Stratified k-fold cross-validation
skf = StratifiedKFold(n_splits = NUM_FOLDS, shuffle = True, random_state = RANDOM_SEED)
for fold, (train_idx, valid_idx) in enumerate(skf.split(train, train['target'])):
# Training and Validation Sets
X_train, y_train = train[features].iloc[train_idx].to_numpy(), train['target'].iloc[train_idx].to_numpy()
X_valid, y_valid = train[features].iloc[valid_idx].to_numpy(), train['target'].iloc[valid_idx].to_numpy()
X_test = test[features]
# Create model
model = clone(sklearn_model)
start = time.time()
model.fit(X_train, y_train)
end = time.time()
# validation and test predictions
valid_preds = model.predict_proba(X_valid)[:, 1]
test_preds += model.predict_proba(X_test)[:, 1] / NUM_FOLDS
oof_preds[valid_idx] = valid_preds
# fold auc score
fold_auc = roc_auc_score(y_valid, valid_preds)
end = time.time()
print(f'Fold {fold} (AUC): {round(fold_auc, 5)} in {round(end-start,2)}s.')
scores[fold] = fold_auc
times[fold] = end-start
time.sleep(0.5)
print("\nAverage AUC:", round(scores.mean(), 5))
print(f'Training Time: {round(times.sum(), 2)}s')
return scores, test_preds, oof_preds
# Scoring/Training function for LearningWithNoisyLabels
def train_noisy_model(sklearn_model):
# Store the holdout predictions
oof_preds = np.zeros((train.shape[0],))
test_preds = np.zeros((test.shape[0],))
scores = np.zeros(NUM_FOLDS)
times = np.zeros(NUM_FOLDS)
print('')
# Stratified k-fold cross-validation
skf = StratifiedKFold(n_splits = NUM_FOLDS, shuffle = True, random_state = RANDOM_SEED)
for fold, (train_idx, valid_idx) in enumerate(skf.split(train, train['target'])):
# Training and Validation Sets
X_train, y_train = train[features].iloc[train_idx].to_numpy(), train['target'].iloc[train_idx].to_numpy()
X_valid, y_valid = train[features].iloc[valid_idx].to_numpy(), train['target'].iloc[valid_idx].to_numpy()
X_test = test[features]
# Create model
model = LearningWithNoisyLabels(
clf = clone(sklearn_model)
)
start = time.time()
model.fit(X_train, y_train)
end = time.time()
# validation and test predictions
valid_preds = model.predict_proba(X_valid)[:, 1]
test_preds += model.predict_proba(X_test)[:, 1] / NUM_FOLDS
oof_preds[valid_idx] = valid_preds
# fold auc score
fold_auc = roc_auc_score(y_valid, valid_preds)
end = time.time()
print(f'Fold {fold} (AUC): {round(fold_auc, 5)} in {round(end-start,2)}s.')
scores[fold] = fold_auc
times[fold] = end-start
time.sleep(0.5)
print("\nAverage AUC:", round(scores.mean(), 5))
print(f'Training Time: {round(times.sum(), 2)}s')
return scores, test_preds, oof_preds
###Output
_____no_output_____
###Markdown
2.1 Logistic Regression
###Code
# Logistic Regression
logit_pipeline = make_pipeline(
StandardScaler(),
LogisticRegression(
solver = 'saga',
random_state = RANDOM_SEED,
n_jobs = -1,
),
)
# Logistic Regression Baseline
logit_scores, logit_preds, logit_oof = train_model(logit_pipeline)
submission['target'] = logit_preds
submission.to_csv('logit_submission.csv', index=False)
# Logistic Regression w/ Wrapper
noisy_logit_scores, noisy_logit_preds, noisy_logit_oof = train_noisy_model(logit_pipeline)
submission['target'] = noisy_logit_preds
submission.to_csv('noisy_logit_submission.csv', index=False)
###Output
Fold 0 (AUC): 0.75059 in 134.5s.
Fold 1 (AUC): 0.74718 in 139.96s.
Fold 2 (AUC): 0.75141 in 139.6s.
Fold 3 (AUC): 0.74795 in 136.98s.
Fold 4 (AUC): 0.74919 in 138.38s.
Fold 5 (AUC): 0.74964 in 137.78s.
Fold 6 (AUC): 0.75106 in 139.06s.
Fold 7 (AUC): 0.74626 in 138.98s.
Average AUC: 0.74916
Training Time: 1105.25s
###Markdown
2.2 Ridge RegressionThe wrapper function expects an estimator with a `predict_proba` method, so we create an equivalent using softmax:
###Code
# Class extending Ridge Regression
class ExtendedRidgeClassifier(RidgeClassifier):
def predict_proba(self, X):
temp = self.decision_function(X)
return softmax(np.c_[-temp, temp])
# Ridge Regression
ridge_pipeline = make_pipeline(
StandardScaler(),
ExtendedRidgeClassifier(random_state = RANDOM_SEED),
)
# Ridge Regression Baseline
ridge_scores, ridge_preds, ridge_oof = train_model(ridge_pipeline)
submission['target'] = ridge_preds
submission.to_csv('ridge_submission.csv', index=False)
# Ridge Regression w/ Wrapper
noisy_ridge_scores, noisy_ridge_preds, noisy_ridge_oof = train_noisy_model(ridge_pipeline)
submission['target'] = noisy_ridge_preds
submission.to_csv('noisy_ridge_submission.csv', index=False)
###Output
Fold 0 (AUC): 0.75022 in 19.81s.
Fold 1 (AUC): 0.74686 in 19.61s.
Fold 2 (AUC): 0.75132 in 19.73s.
Fold 3 (AUC): 0.74751 in 19.52s.
Fold 4 (AUC): 0.74875 in 19.84s.
Fold 5 (AUC): 0.74943 in 19.89s.
Fold 6 (AUC): 0.75062 in 19.42s.
Fold 7 (AUC): 0.74589 in 19.6s.
Average AUC: 0.74883
Training Time: 157.42s
###Markdown
2.3 Linear Discriminant Analysis
###Code
# Linear Discriminant Analysis
lda_pipeline = make_pipeline(
StandardScaler(),
LinearDiscriminantAnalysis(),
)
lda_scores, lda_preds, lda_oof = train_model(lda_pipeline)
submission['target'] = lda_preds
submission.to_csv('lda_submission.csv', index=False)
noisy_lda_scores, noisy_lda_preds, noisy_lda_oof = train_noisy_model(lda_pipeline)
submission['target'] = noisy_lda_preds
submission.to_csv('noisy_lda_submission.csv', index=False)
###Output
Fold 0 (AUC): 0.75022 in 60.23s.
Fold 1 (AUC): 0.74686 in 60.96s.
Fold 2 (AUC): 0.75132 in 61.04s.
Fold 3 (AUC): 0.7475 in 60.34s.
Fold 4 (AUC): 0.74875 in 61.05s.
Fold 5 (AUC): 0.74942 in 61.16s.
Fold 6 (AUC): 0.75062 in 60.47s.
Fold 7 (AUC): 0.74589 in 61.33s.
Average AUC: 0.74883
Training Time: 486.59s
###Markdown
2.4 SGDClassifierWe use the parameters borrowed from [this notebook](https://www.kaggle.com/kaaveland/tps-nov-2021-some-models-that-work-ok). Again, since the wrapper function expects an estimator with a `predict_proba` method, we create an equivalent using softmax:
###Code
# Extended SGDClassifier
class ExtendedSGDClassifier(SGDClassifier):
def predict_proba(self, X):
temp = self.decision_function(X)
return softmax(np.c_[-temp, temp])
# SGDClassifier
sgd_pipeline = make_pipeline(
RobustScaler(),
ExtendedSGDClassifier(
loss='hinge',
learning_rate='adaptive',
penalty='l2',
alpha=1e-3,
eta0=0.025,
random_state = RANDOM_SEED
)
)
sgd_scores, sgd_preds, sgd_oof = train_model(sgd_pipeline)
submission['target'] = sgd_preds
submission.to_csv('sgd_submission.csv', index=False)
noisy_sgd_scores, noisy_sgd_preds, noisy_sgd_oof = train_noisy_model(sgd_pipeline)
submission['target'] = noisy_sgd_preds
submission.to_csv('noisy_sgd_submission.csv', index=False)
###Output
Fold 0 (AUC): 0.75062 in 95.79s.
Fold 1 (AUC): 0.74721 in 100.82s.
Fold 2 (AUC): 0.75152 in 103.5s.
Fold 3 (AUC): 0.74797 in 96.53s.
Fold 4 (AUC): 0.7492 in 98.85s.
Fold 5 (AUC): 0.74962 in 98.77s.
Fold 6 (AUC): 0.75103 in 97.25s.
Fold 7 (AUC): 0.74629 in 95.59s.
Average AUC: 0.74918
Training Time: 787.1s
###Markdown
2.5 Naive Bayes Classifier
###Code
# Naive Bayes Classifier
nb_pipeline = make_pipeline(
MinMaxScaler(),
MultinomialNB(),
)
nb_scores, nb_preds, nb_oof = train_model(nb_pipeline)
submission['target'] = nb_preds
submission.to_csv('nb_submission.csv', index=False)
noisy_nb_scores, noisy_nb_preds, noisy_nb_oof = train_noisy_model(nb_pipeline)
submission['target'] = noisy_nb_preds
submission.to_csv('noisy_nb_submission.csv', index=False)
###Output
Fold 0 (AUC): 0.7231 in 8.04s.
Fold 1 (AUC): 0.72032 in 7.91s.
Fold 2 (AUC): 0.72714 in 7.88s.
Fold 3 (AUC): 0.72086 in 7.86s.
Fold 4 (AUC): 0.72175 in 7.85s.
Fold 5 (AUC): 0.72389 in 7.85s.
Fold 6 (AUC): 0.72371 in 7.87s.
Fold 7 (AUC): 0.71999 in 7.95s.
Average AUC: 0.7226
Training Time: 63.22s
###Markdown
2.6 Multi-Layer Perceptron Classifier
###Code
# Multi-layer Perceptron Classifier
mlp_pipeline = make_pipeline(
StandardScaler(),
MLPClassifier(
hidden_layer_sizes=(128, 64),
batch_size = 256,
early_stopping = True,
validation_fraction = 0.2,
n_iter_no_change = 5,
random_state = RANDOM_SEED
),
)
mlp_scores, mlp_preds, mlp_oof = train_model(mlp_pipeline)
submission['target'] = mlp_preds
submission.to_csv('mlp_submission.csv', index=False)
noisy_mlp_scores, noisy_mlp_preds, noisy_mlp_oof = train_noisy_model(mlp_pipeline)
submission['target'] = noisy_mlp_preds
submission.to_csv('noisy_mlp_submission.csv', index=False)
###Output
Fold 0 (AUC): 0.74979 in 405.98s.
Fold 1 (AUC): 0.74689 in 366.66s.
Fold 2 (AUC): 0.75084 in 384.15s.
Fold 3 (AUC): 0.7488 in 388.0s.
Fold 4 (AUC): 0.75032 in 407.19s.
Fold 5 (AUC): 0.74919 in 407.3s.
Fold 6 (AUC): 0.75024 in 404.54s.
Fold 7 (AUC): 0.74585 in 400.8s.
Average AUC: 0.74899
Training Time: 3164.61s
###Markdown
2.7 XGBoost with Linear Models
###Code
# XGBoost Classifier
xgb_pipeline = make_pipeline(
StandardScaler(),
XGBClassifier(
booster = 'gblinear',
eval_metric = 'auc',
random_state = RANDOM_SEED
),
)
xgb_scores, xgb_preds, xgb_oof = train_model(xgb_pipeline)
submission['target'] = xgb_preds
submission.to_csv('xgb_submission.csv', index=False)
noisy_xgb_scores, noisy_xgb_preds, noisy_xgb_oof = train_noisy_model(xgb_pipeline)
submission['target'] = noisy_xgb_preds
submission.to_csv('noisy_xgb_submission.csv', index=False)
###Output
Fold 0 (AUC): 0.75049 in 96.02s.
Fold 1 (AUC): 0.74698 in 96.13s.
Fold 2 (AUC): 0.75136 in 95.56s.
Fold 3 (AUC): 0.74795 in 95.08s.
Fold 4 (AUC): 0.7491 in 98.16s.
Fold 5 (AUC): 0.74946 in 94.93s.
Fold 6 (AUC): 0.75109 in 95.03s.
Fold 7 (AUC): 0.74612 in 95.84s.
Average AUC: 0.74907
Training Time: 766.75s
|
exercise4/convolutional_neural_nets.ipynb | ###Markdown
Convolutional Neural NetworksIn this exercise you will be introduced to some practical aspects of deep learning incomputer vision, including constructing a deep neural network and training it via gradientdescent to tackle image classification.We will use the popular TensorFlow framework through the Keras API.We will tackle **image classification** through deep learning methods, in particular we will look at* Dataset download and normalization* Softmax regression with stochastic gradient descent and Adam* Multilayer perceptrons with tanh and ReLU* A basic convolutional net* BatchNorm, striding, global average pooling* Residual networks* Learning rate decay and data augmentation Install TensorFlowInstall TensorFlow using `pip install tensorflow`. TensorFlow can use GPUs to make the training several times faster, but since not all of you may have access to a GPU, we have tried to scale this exercise with a CPU in mind. For more info on installing TensorFlow, see https://www.tensorflow.org/install TensorBoard PlottingTensorBoard is a web-based tool for drawing pretty plots of quantities we care about during training, such as the loss. We need to choose a folder where these values will be stored ("logdir").Start the TensorBoard server by executing e.g. `tensorboard --logdir /tmp/tensorboard_logs` after you've activated your conda environment. If you change the logdir, also adjust it in the cell below.You can view the graphs by visiting http://localhost:6006 in your browser (6006 is the default port).At first there will be nothing to plot, so it will be empty.
###Code
log_root= '/tmp/tensorboard_logs'
%%html
<!-- Run this cell to add heading letters per subtask (like a, b, c) -->
<style>
body {counter-reset: section;}
h2:before {counter-increment: section;
content: counter(section, lower-alpha) ") ";}
</style>
%matplotlib notebook
import os
import datetime
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import imageio
import cv2
from attrdict import AttrDict
import tensorflow.keras.models as models
import tensorflow.keras.layers as layers
import tensorflow.keras.regularizers as regularizers
import tensorflow.keras.optimizers as optimizers
import tensorflow.keras.callbacks as callbacks
import tensorflow.keras.initializers as initializers
import tensorflow.keras.preprocessing.image as kerasimage
# Just an image plotting function
def plot_multiple(images, titles=None, colormap='gray',
max_columns=np.inf, imwidth=4, imheight=4, share_axes=False):
"""Plot multiple images as subplots on a grid."""
if titles is None:
titles = [''] *len(images)
assert len(images) == len(titles)
n_images = len(images)
n_cols = min(max_columns, n_images)
n_rows = int(np.ceil(n_images / n_cols))
fig, axes = plt.subplots(
n_rows, n_cols, figsize=(n_cols * imwidth, n_rows * imheight),
squeeze=False, sharex=share_axes, sharey=share_axes)
axes = axes.flat
# Hide subplots without content
for ax in axes[n_images:]:
ax.axis('off')
if not isinstance(colormap, (list,tuple)):
colormaps = [colormap]*n_images
else:
colormaps = colormap
for ax, image, title, cmap in zip(axes, images, titles, colormaps):
ax.imshow(image, cmap=cmap)
ax.set_title(title)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
Dataset PreparationWe are going to tackle the classic image classification task using the **CIFAR-10 dataset**, containing 60,000 32x32 RGB images of 10 different classes (10,000 for training and 10,000 for testing). ![image.png](cifar.png)The dataset is automatically downloaded if you run the next cell.You may read more about the dataset at https://www.cs.toronto.edu/~kriz/cifar.html.A common normalization strategy is to map the image RGB values to the range 0-1 and to subtract the mean training pixel value. Perform this normalization below.
###Code
(im_train, y_train), (im_test, y_test) = tf.keras.datasets.cifar10.load_data()
# Normalize to 0-1 range and subtract mean of training pixels
# YOUR CODE HERE
raise NotImplementedError()
image_shape = x_train[0].shape
labels = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
###Output
_____no_output_____
###Markdown
Softmax RegressionBefore considering convolutional neural networks, let us start with a simpler classifier called softmax regression (a.k.a. multinomial logistic regression). Note that even though the name contains "regression", this is a classification model.Softmax regression can be understood as a single-layer neural network. We first flatten our input image to a long vector $\mathbf{x}$, consisting of $32\cdot 32\cdot 3= 3072$ values. Then we predict class probabilities $\hat{\mathbf{y}}$ through a fully-connected layer with softmax activation:$$\mathbf{z} = W \mathbf{x} + \mathbf{b} \\\hat{y}_c = \operatorname{softmax}(\mathbf{z})_c = \frac{\exp{z_c}}{\sum_{\tilde{c}=1}^{10} \exp{z_{\tilde{c}}}}$$Here $z_c$ denotes the $c$th component of the vector $\mathbf{z}$, called the vector of **logits**.The weights $W$ and biases $\mathbf{b}$ will be learned during training. TrainingWe train the model by minimizing a **loss function** averaged over the training data. As we are tackling a classification problem, the **cross-entropy** is a suitable loss function:$$\mathcal{L}_{CE}(\mathbf{y}, \hat{\mathbf{y}}; W, \mathbf{b}) = - \sum_{c=1}^{10} y_c \log{\hat{y}_c}$$Note that in the above notation the ground-truth $\mathbf{y}$ is a so-called **one-hot vector**, containing a single 1 component, while the remaining components are zeros. The model's predicted $\hat{\mathbf{y}}$ is a vector which also sums to one, but whose components all take continuous values in the range $(0, 1)$. What is the intuition behind this loss function?We minimize the loss by **stochastic gradient descent** (SGD). That is, we repeatedly sample mini-batches from the training data and update the parameters (weights and biases) towards the direction of the steepest decrease of the loss averaged over the mini-batch. For example, the weight $w_{ij}$ (an element of the matrix $W$) is updated according to:$$w_{ij}^{(t+1)} = w_{ij}^{(t)} - \eta \cdot \frac{\partial \mathcal{L}_{CE}} {\partial w_{ij}},$$with $\eta$ being the learning rate.----This is all very straightforward to perform in Keras. `models.Sequential` accepts a list of layers that are applied sequentially, in a chain. Here we have two layers, `Flatten` to convert the image into a long vector and `Dense`, which is a synonym for fully-connected layer.
###Code
softmax_regression = models.Sequential([
layers.Flatten(input_shape=image_shape),
layers.Dense(10, activation='softmax')],
name='linear')
def train_model(model, batch_size=128, n_epochs=30, optimizer=optimizers.SGD, learning_rate=1e-2):
opt = optimizer(lr=learning_rate)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
logdir = os.path.join(log_root, f'{model.name}_{timestamp}')
tensorboard_callback = callbacks.TensorBoard(logdir, histogram_freq=1)
model.fit(x=x_train, y=y_train, verbose=1, epochs=n_epochs,
validation_data=(x_test, y_test), batch_size=batch_size,
callbacks=[tensorboard_callback])
train_model(softmax_regression, optimizer=optimizers.SGD, learning_rate=1e-2)
###Output
_____no_output_____
###Markdown
(Jupyter Notebook Tip: After you're done training, you can collapse or hide the output by clicking or double clicking the area directly to the left of the output.)You can check the how the loss and accuracy change over the course of trainng in TensorBoard.What would be the cross-entropy loss for a dummy classifier that always outputs equal probabilities for all the classes? Adam OptimizerThere has been a lot of research on improving on the simple stochastic gradient descent algorithm we used above. One of the most popular variants is called **Adam** (https://arxiv.org/abs/1412.6980, "adaptive moment estimation"). Its learning rate usually requires less precise tuning, and something in the range of $(10^{-4},10^{-3})$ often works well in practice. Intuitively, this is because the algorithm automatically adapts the learning rate for each weight depending on the gradients.You can run it as follows (the optimizer is passed to Keras's `model.fit` function in `train_model`). The difference is not large for such a simple model, but makes a bigger difference for larger networks.
###Code
softmax_regression = models.Sequential([
layers.Flatten(input_shape=image_shape),
layers.Dense(10, activation='softmax')],
name='linear')
train_model(softmax_regression, optimizer=optimizers.Adam, n_epochs=30, learning_rate=2e-4)
###Output
_____no_output_____
###Markdown
Interpreting the Learned WeightsMultiplication by the weights $W$ can be interpreted as computing responses to correlation templates per image class.That means, we can reshape the weight array $W$ to a obtain "template images".Perform this reshaping and visualize the resulting templates.Do they look as you would expect?
###Code
W, b = softmax_regression.layers[1].get_weights()
# Create the `templates` variable here based on W, with dimensions [10 (num_classes), height, width, 3 (rgb)]
# YOUR CODE HERE
raise NotImplementedError()
# We normalize the templates to the 0-1 range for visualization
mini = np.min(templates, axis=(1,2,3), keepdims=True)
maxi = np.max(templates, axis=(1,2,3), keepdims=True)
rescaled_templates = (templates - mini)/ (maxi-mini)
plot_multiple(rescaled_templates, labels, max_columns=5, imwidth=1, imheight=1)
###Output
_____no_output_____
###Markdown
Multi-Layer PerceptronSoftmax regression has a big limitation: the decision surface between any two classes (i.e. the part of the input space where the classification decision changes from one class to another) is a simple hyperplane ("flat").The **multi-layer perceptron** (MLP) is a neural network model with additional layer(s) between the input and the logits (so-called hidden layers), with nonlinear activation functions. Why are activation functions needed?Before the current generation of neural networks, the **hyperbolic tangent** (tanh) function used to be the preferred activation function in the hidden layers of MLPs. It is sigmoid shaped and has a range of $(-1,1)$. We can create such a network in Keras as follows. Does it obtain better results than the linear model?
###Code
tanh_mlp = models.Sequential([
layers.Flatten(input_shape=image_shape),
layers.Dense(512, activation='tanh'),
layers.Dense(10, activation='softmax')],
name='tanh_mlp')
train_model(tanh_mlp, optimizer=optimizers.Adam, learning_rate=2e-4)
###Output
_____no_output_____
###Markdown
ReLUThe ReLU activation function has become more popular in recent years, especially for deeper nets. Create and train an MLP that uses ReLU as the activation. Do the results change? What benefits does ReLU have against tanh? YOUR ANSWER HERE
###Code
relu_mlp = models.Sequential([
layers.Flatten(input_shape=image_shape),
layers.Dense(512, activation='relu', kernel_initializer='he_uniform'),
layers.Dense(10, activation='softmax')],
name='relu_mlp')
train_model(relu_mlp, optimizer=optimizers.Adam, learning_rate=2e-4)
###Output
_____no_output_____
###Markdown
A Simple Convolutional Neural NetworkThe previous models did not explicitly make use of the grid structure of the image pixels. Convolutional neural networks do.Instead of reshaping the input image pixels into one long vector, convolutional layers slide small filters across the input, just as with the convolutional filters we saw earlier in the course. In the earlier parts, we looked at convolution on an image with a single channel in case of grayscale images, or channelwise separate convolutions on RGB images.In CNNs, the multiple input channels of a conv layer are not handled independently, but are linearly combined. This means that the weight array has shape `[kernel_height, kernel_width, num_input_channels, num_output_channels]` and we perform a weighted sum along the input channel axis. Another difference is the use of a **bias** vector of shape `[num_output_channels]`, each component of which gets added on the corresponding output channel.As you already know, convolution is a linear operator, so it is possible to express any convolutional layer as a fully-connected layer.However, the convolutional layer's weight matrix is sparse (has many zeros) compared to a fully-connected ("dense") layer because each output only depends on a small number of inputs, namely, those within a small neigborhood. Further, the weight values are shared between the different pixel locations.This tutorial has some great visualisations and explanations on the details of conv layers: https://arxiv.org/abs/1603.07285.**Q:** Assuming a fixed input image size, do you think the reverse of the above also holds? Can any fully-connected layer be expressed as a convolutional layer? YOUR ANSWER HERE **Q:** Technically, what's called a "convolutional" layer is usually implemented as a *cross-correlation* computation. Could there be any advantage in using the actual definition of convolution in these layers? YOUR ANSWER HERE Train the following simple CNN model. It may take about 15 minutes on a CPU.
###Code
cnn = models.Sequential([
layers.Conv2D(filters=64, kernel_size=3, activation='relu',
kernel_initializer='he_uniform', padding='same',
input_shape=image_shape),
layers.MaxPooling2D(pool_size=2, strides=2),
layers.Conv2D(filters=64, kernel_size=3, activation='relu',
kernel_initializer='he_uniform', padding='same'),
layers.MaxPooling2D(pool_size=2, strides=2),
layers.Flatten(),
layers.Dense(10, activation='softmax')],
name='cnn')
train_model(cnn, optimizer=optimizers.Adam, learning_rate=1e-3, n_epochs=15)
###Output
_____no_output_____
###Markdown
**Q:** Does it improve the result? Does it run faster than the MLP? How many parameters does this model have?**Q:** How large is the output space of the first convolutional layer (i.e. how many numbers are output by that layer per image)? How does this compare to the size of the hidden layer in the MLP? YOUR ANSWER HERE Implementing the Forward PassTo confirm your understanding of the main CNN components, implement the forward pass of the convolutional, max pooling and dense layers, plus the relu and softmax activation functions. For simplicity, assume a fixed filter size of 3x3 for the convolution, with stride 1 and use zero padding, such that the spatial size does not change (called 'same' padding). Implement this in `conv3x3_same`. For max pooling assume a fixed 2x2 pooling size and stride 2 in `maxpool2x2`.To check whether your implementation is correct, we can extract the weights from the Keras model we trained above, and feed these weights and an test input to your implementation of the forward pass. If your result disagrees with Keras, there is probably a bug somewhere!You can also generalize these to other filter sizes and strides as well.(Implementation of the backward pass does not fit within this exercise, but the "Machine Learning" course of our chair does include such exercises.)
###Code
def conv3x3_same(x, weights, biases):
"""Convolutional layer with filter size 3x3 and 'same' padding.
`x` is a NumPy array of shape [height, width, n_features_in]
`weights` has shape [3, 3, n_features_in, n_features_out]
`biases` has shape [n_features_out]
Return the output of the 3x3 conv (without activation)
"""
# YOUR CODE HERE
raise NotImplementedError()
return result
def maxpool2x2(x):
"""Max pooling with pool size 2x2 and stride 2.
`x` is a numpy array of shape [height, width, n_features]
"""
# YOUR CODE HERE
raise NotImplementedError()
return result
def dense(x, weights, biases):
# YOUR CODE HERE
raise NotImplementedError()
def relu(x):
# YOUR CODE HERE
raise NotImplementedError()
def softmax(x):
# YOUR CODE HERE
raise NotImplementedError()
def my_predict_cnn(x, W1, b1, W2, b2, W3, b3):
x = conv3x3_same(x, W1, b1)
x = relu(x)
x = maxpool2x2(x)
x = conv3x3_same(x, W2, b2)
x = relu(x)
x = maxpool2x2(x)
x = x.reshape(-1)
x = dense(x, W3, b3)
x = softmax(x)
return x
W1, b1 = cnn.layers[0].get_weights()
W2, b2 = cnn.layers[2].get_weights()
W3, b3 = cnn.layers[5].get_weights()
i_test = 12
inp = x_test[i_test]
my_prob = my_predict_cnn(inp, W1, b1, W2, b2, W3, b3)
keras_prob = cnn.predict(inp[np.newaxis])[0]
if np.mean((my_prob-keras_prob)**2) > 1e-10:
print('Something isn\'t right! Keras gives different results than my_predict_cnn!')
else:
print('Congratulations, you got correct results!')
i_maxpred = np.argmax(my_prob)
plot_multiple([im_test[i_test]], [f'Pred: {labels[i_maxpred]}, {my_prob[i_maxpred]:.1%}'], imheight=2)
###Output
_____no_output_____
###Markdown
Batch NormalizationBatch normalization is a modern technique to improve and speed up the training of deep neural networks (BatchNorm, Ioffe & Szegedy ICML'15, https://arxiv.org/abs/1502.03167). Each feature channel is normalized to have zero mean and unit variance across the spatial and mini-batch axes. To compensate for the lost degrees of freedom, extra scaling and bias parameters are introduced and learned. Mathematically, BatchNorm for a spatial feature map (e.g. the output of conv) can be written as:$$\mu_d = \mathbb{E}\{x_{\cdot \cdot d}\}, \\\sigma_d = \sqrt{\operatorname{Var}\{x_{\cdot \cdot d}\}} \\z_{ijd} = \gamma_d \cdot \frac{x_{ijd} - \mu_d}{\sigma_d} + \beta_d,\\$$with the expectation and variance taken across both the data samples of the batch and the spatial dimensions.The $\mu_d$ and $\sigma_d$ values are computed on the actual mini-batch during training, but at test-time they are fixed, so that the prediction of the final system on a given sample does not depend on other samples in the mini-batch. To obtain the fixed values for test-time use, one needs to maintain moving statistics over the activations during training. This can be a bit tricky to implement from scratch, but luckily this is now implemented in all popular frameworks, including TensorFlow and Keras.**Q:** When applying BatchNorm, it is not necessary to use biases in the previous convolutional layer. Can you explain why? Use the "use_bias" argument of `layers.Conv2D` accordingly.**Q:** Furthermore, if the BatchNorm is followed by a linear or conv layer (with perhaps a ReLU in between), it is not necessary to use the to use the $\gamma_d$ factor in BatchNorm (it can be turned off as `layers.BatchNormalization(scale=False)`). Why? YOUR ANSWER HERE Create a modified version of the previous model, where the `Conv2D` layers don't include the activation any more, and instead, insert a `layers.BatchNormalization()` and a `layers.Activation('relu')` layer after each conv. Does this model obtain better results?
###Code
cnn_batchnorm = models.Sequential([
# ....
])
# YOUR CODE HERE
raise NotImplementedError()
train_model(cnn_batchnorm, optimizer=optimizers.Adam, learning_rate=1e-3, n_epochs=15)
###Output
_____no_output_____
###Markdown
Strided ConvolutionsMax-pooling is a popular technique for reducing the spatial dimensionalityof the outputs from conv layers. Another way to reduce dimensionality is striding. For an argument why this may be similarly effective, see [Springenberg et al., ICLRW'15](https://arxiv.org/pdf/1412.6806.pdf).Now create a model using the same architecture as before, with the difference ofremoving the max-pooling layers and increasing the stride parameter of the conv layers to $2 \times 2$ in the spatial dimensions. What differences do you notice when training this new network?What is a clear advantage for using strides and why?
###Code
cnn_strides = models.Sequential([
# ....
])
# YOUR CODE HERE
raise NotImplementedError()
train_model(cnn_strides, optimizer=optimizers.Adam, learning_rate=1e-3, n_epochs=15)
###Output
_____no_output_____
###Markdown
Global PoolingThe above network ends in a `Flatten` layer followed by a `Dense` layer, in which the number of weights depends on the input size. This means meaning that testing can only be performed on the exact same image size. Several architectures employ a (spatial) **global average pooling layer** to produce of vector of fixed size describing the whole image, instead of flattening.For this to work well, the units before the average pooling need to have a large enough receptive field. Therefore, compared with the previous model, remove the `Flatten` layer and instead add a third Conv-BatchNorm-ReLU combination, followed by a `layers.GlobalAveragePooling2D()` layer (before the final `Dense` layer).**Q:** Which network has more parameters, this or the previous one?**Q:** What is the size of the receptive field of the units in the layer directly before the global average pooling? (Remember: the receptive field of a particular unit (neuron) is the area of the *input image* that can influence the activation of this given unit).Train it and see if it reaches similar accuracy to the previous one.
###Code
cnn_global_pool = models.Sequential([
# ....
])
# YOUR CODE HERE
raise NotImplementedError()
train_model(cnn_global_pool, optimizer=optimizers.Adam, learning_rate=1e-3)
###Output
_____no_output_____
###Markdown
Residual NetworksResNet is a more modern architecture, introduced by He et al. in 2015 (published in 2016: https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf) and is still popular today.It consists of blocks like the following:![ResNet Block](resnet_block.png)Each of these so-called *residual blocks* only have to predict a *residual* (in plain words: the "rest", the "leftover") that will be added on top of its input.In other words, the block outputs how much each feature needs to be changed in order to enhance the representation compared to the previous block.There are several ways to combine residual blocks into *residual networks* (ResNets). In the following, we consider ResNet-v1, as used for the CIFAR-10 benchmark in the original ResNet paper (it is simpler compared to the full model that they used for the much larger ImageNet benchmark).Section 4.2. of the paper describes this architecture as follows: "*The first layer is 3×3 convolutions. Then we use a stack of 6n layers with 3×3 convolutions on the feature maps of sizes {32, 16, 8} respectively, with 2n layers for each feature map size. The numbers of filters are {16, 32, 64} respectively. The subsampling is performed by convolutions with a stride of 2. The network ends with a global average pooling, a 10-way fully-connected layer, and softmax. [...] When shortcut connections are used, they are connected to the pairs of 3×3 layers (totally 3n shortcuts). On this dataset we use identity shortcuts in all cases.*"Further, they use L2 regularization (a standard tool to combat overfitting). This penalizes weights with large magnitude by adding an additional term to the cost function, besides the cross-entropy. The overall function to optimize becomes:$$\mathcal{L}_{CE} + \frac{\lambda}{2} \sum_{w\in\text{weights}} w^2,$$and in this paper $\lambda=10^{-4}$.In the previous parts of this exercise we have already seen every major component we need to build this thing. However, ResNet is not a pure sequential architecture due to the skip connections. This means we cannot use `models.Sequential`. Luckily, Keras also offers a functional API. Look below to understand how this API works and fill in the missing pieces to make a ResNet.
###Code
def resnet(num_layers=56):
if (num_layers - 2) % 6 != 0:
raise ValueError('n_layers should be 6n+2 (eg 20, 32, 44, 56)')
n = (num_layers - 2) // 6
inputs = layers.Input(shape=image_shape)
# First layer
x = layers.Conv2D(16, 3, use_bias=False,
kernel_regularizer=regularizers.l2(1e-4),
padding='same', kernel_initializer='he_normal')(inputs)
x = layers.BatchNormalization(scale=False)(x)
x = layers.Activation('relu')(x)
# Call the `resnet_block` function in loops to stack ResNet blocks according to the instructions above.
# YOUR CODE HERE
raise NotImplementedError()
# Global pooling and classifier on top
x = layers.GlobalAveragePooling2D()(x)
outputs = layers.Dense(10, activation='softmax',
kernel_regularizer=regularizers.l2(1e-4))(x)
return models.Model(inputs=inputs, outputs=outputs, name=f'resnet{num_layers}')
def resnet_block(x, n_channels_out, strides=1):
# First conv
f = layers.Conv2D(n_channels_out, 3, strides, use_bias=False,
kernel_regularizer=regularizers.l2(1e-4),
padding='same', kernel_initializer='he_normal')(x)
f = layers.BatchNormalization(scale=False)(f)
f = layers.Activation('relu')(f)
# Second conv
# YOUR CODE HERE
raise NotImplementedError()
# The shortcut connection is just the identity.
# If feature channel counts differ between input and output,
# zero padding is used to match the depths.
# This is implemented by a Conv2D with fixed weights.
n_channels_in = x.shape[-1]
if n_channels_in != n_channels_out:
# Fixed weights, np.eye returns a matrix with 1s along the
# main diagonal and zeros elsewhere.
identity_weights = np.eye(n_channels_in, n_channels_out, dtype=np.float32)
layer = layers.Conv2D(
n_channels_out, kernel_size=1, strides=strides, use_bias=False,
kernel_initializer=initializers.Constant(value=identity_weights))
# Not learned! Set trainable to False:
layer.trainable = False
x = layer(x)
# This is where the ResNet magic happens: the shortcut connection is
# added to the residual.
x = layers.add([x, f])
return layers.Activation('relu')(x)
###Output
_____no_output_____
###Markdown
Learning Rate Decay and Data Augmentation - Our Final ModelLearning rate decay reduces the learning rate as the training progresses. It can be implemented as a Keras callback as shown below.If you have a good GPU or lot of time, train ResNet-56 on the CIFAR-10 dataset for about 150 epochs. As a rough idea, it will take about 1-2 hours with a good GPU, but on a CPU it could take a day or two. If that's too long, train a smaller ResNet, wih `num_layers`=14 or 20, or do fewer epochs.To add data augmentation (e.g. random translation or rotation of the input images), look up the documentation for the `ImageDataGenerator` class. The ResNet model presented in the original paper was trained with random translations of $\pm$ 4 px. Does the augmentation improve the final performance? What do you observe on the training and validation curves compared to no augmentation?
###Code
def learning_rate_schedule(epoch):
"""Learning rate is scheduled to be reduced after 80 and 120 epochs.
This function is automatically every epoch as part of callbacks
during training.
"""
if epoch < 80:
return 1e-3
if epoch < 120:
return 1e-4
return 1e-5
def train_with_lr_decay(model):
model.compile(
loss='sparse_categorical_crossentropy', metrics=['accuracy'],
optimizer=optimizers.Adam(lr=1e-3))
# Callback for learning rate adjustment (see below)
lr_scheduler = callbacks.LearningRateScheduler(learning_rate_schedule)
# TensorBoard callback
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
logdir = os.path.join(log_root, f'{model.name}_{timestamp}')
tensorboard_callback = callbacks.TensorBoard(logdir, histogram_freq=1)
# Fit the model on the batches generated by datagen.flow()
model.fit(
x_train, y_train, batch_size=128,
validation_data=(x_test, y_test), epochs=150, verbose=1,
callbacks=[lr_scheduler, tensorboard_callback])
def train_with_lr_decay_and_augmentation(model):
model.compile(
loss='categorical_crossentropy', metrics=['accuracy'],
optimizer=optimizers.Adam(lr=1e-3))
# Callback for learning rate adjustment (see below)
lr_scheduler = callbacks.LearningRateScheduler(learning_rate_schedule)
# TensorBoard callback
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
logdir = os.path.join(log_root, f'{model.name}_augmented_{timestamp}')
tensorboard_callback = callbacks.TensorBoard(logdir, histogram_freq=1)
# Data augmentation: flip and shift horizontally/vertically by max 4 pixels
# datagen = kerasimage.ImageDataGenerator(...)
# YOUR CODE HERE
raise NotImplementedError()
# Note: model.fit with generator input seems to only work when the
# y targets are provided as one-hot vectors
y_train_onehot = tf.keras.utils.to_categorical(y_train, 10)
y_test_onehot = tf.keras.utils.to_categorical(y_test, 10)
# Fit the model on the batches generated by datagen.flow() using model.fit()
# YOUR CODE HERE
raise NotImplementedError()
resnet56 = resnet(56)
train_with_lr_decay(resnet56)
train_with_lr_decay_and_augmentation(resnet56)
###Output
_____no_output_____ |
notebooks/tumblr_crawler.ipynb | ###Markdown
Digest into posts
###Code
posts.delete_many({'source_platform': 'tumblr'})
# from https://stackoverflow.com/questions/51488240/python-get-json-keys-as-full-path
def get_paths(source):
paths = []
if isinstance(source, MutableMapping): # found a dict-like structure...
for k, v in source.items(): # iterate over it; Python 2.x: source.iteritems()
paths.append([k]) # add the current child path
paths += [[k] + x for x in get_paths(v)] # get sub-paths, extend with the current
# else, check if a list-like structure, remove if you don't want list paths included
elif isinstance(source, Sequence) and not isinstance(source, str):
# Python 2.x: use basestring instead of str ^
for i, v in enumerate(source):
paths.append([i])
paths += [[i] + x for x in get_paths(v)] # get sub-paths, extend with the current
return paths
c = Counter([str(p) for s in raw.find() for p in get_paths(s)])
with open('tumblr_attrs.txt', 'w') as f:
f.write(str(c))
class Post ():
_attrs = [
'id',
'post_id',
'datetime',
'url',
'title',
'content',
'thumbnail',
'preview',
'num_comments',
'source',
'source_platform',
'source_url',
'tags',
'media_type',
'labels'
]
def __init__ (self, p):
self._p = p
self.id = p['id']
self.post_id = f'tumblr/{p["blog_name"]}/{p["id"]}'
self.url = p['post_url']
self.title = p['summary']
self.num_comments = p['note_count']
self.source = p['blog']['name']
self.source_platform = 'tumblr'
self.source_url = p['blog']['url']
def digest (self):
return {a:getattr(self, a) for a in Post._attrs}
@property
def datetime (self):
return datetime.fromtimestamp(self._p['timestamp']).isoformat()
@property
def content (self):
if 'caption' in self._p:
return bs(self._p['caption'], 'html.parser').get_text()
else:
return ''
@property
def tags (self):
return self._p['tags']
@property
def labels (self):
return {
'auto': labels_from_tags(self.tags)
}
@property
def media_type (self):
if self.preview['url'] == '':
return self._p['type']
else:
return 'image'
@property
def preview (self):
if 'photos' in self._p:
return {
'url': self._p['photos'][0]['original_size']['url'],
'width': self._p['photos'][0]['original_size']['width'],
'height': self._p['photos'][0]['original_size']['height']
}
elif self._p['type'] == 'video':
print('Video type')
if self._p['video_type'] == "unknown":
print("Unknown video type")
print(self._p['player'][-1]['embed_code'])
else:
print(f'Tumblr post {self._p["id"]} has video: {self._p["permalink_url"]}')
elif self._p['type'] == 'text':
print('Text type')
if 'body' in self._p:
img = bs(self._p['body'], 'html.parser').find('img')
if img:
print(f'Tumblr posts {self._p["id"]} has img in body: {img["data-orig-width"]} {img["data-orig-height"]} {img["src"]}')
print()
return {
'url': img['src'],
'width': img['data-orig-width'],
'height': img['data-orig-height'],
}
print(f'Tumblr posts missing photos: {self._p["id"]}')
print()
return {
'url': '',
'width': 0,
'height': 0
}
@property
def thumbnail (self):
if 'photos' in self._p:
thumbnails = [s for s in self._p['photos'][0]['alt_sizes'] if s['width'] > 140 or s['height'] > 140]
thumbnail = self._p['photos'][0]['alt_sizes'][0] if len(thumbnails) == 0 else thumbnails[-1]
return {
'url': thumbnail['url'],
'width': thumbnail['width'],
'height': thumbnail['height']
}
else:
return {
'url': '',
'width': 0,
'height': 0
}
def digest_all_posts ():
for p in tqdm(raw.find()):
post = Post(p)
posts.replace_one({'post_id': post.post_id}, post.digest(), upsert=True)
return
digest_all_posts()
###Output
_____no_output_____ |
src/Different Designs Comparison/Reduce_circuits_depth.ipynb | ###Markdown
I will reduce the `ancilla` & `noancilla` depth of Walid's circuits, not in terms of gates but in terms of the parallel implementation
###Code
import sys, os
sys.path.append(os.path.abspath(os.path.join('..', 'Grover')))
from qiskit import *
from grover import *
###Output
_____no_output_____
###Markdown
There are two factors that contribute to the circuit depth: **1- The number of gates:**The depth of a circuit is the number of time steps required, assuming that gates acting on distinct bits can operate simultaneously (that is, the depth is the maximum length of adirected path from the input to the output of the circuit)[[1](http://www.theory.caltech.edu/~preskill/ph219/chap5_13.pdf)]. It's the maximum of the wire depths.*Example:* Let's suppose the following circuit and extract its depth:
###Code
d1=QuantumCircuit(2,2)
d1.x(1)
d1.h(0)
d1.x(0)
d1.h(1)
d1.h(1)
d1.measure([0,1],[0,1])
print('The detpth of that circuit equal:', d1.depth())
d1.draw('mpl')
###Output
The detpth of that circuit equal: 4
###Markdown
$\rightarrow$ As can be seen, The first wire has depth 3 (the measurement is counted) and the second has depth 4. The circuit depth is 4, the maximum of the wire depths. **2- Levels of a circuit:** This is another aspect that could lead to the increase or decrease of the circuit depth, **and this is what makes an issue in my code, which I will discuss below.**A level is defined as a subsequence of commuting gates that can be applied in parallel, whereby all gates of a certain level are executed within the same time unit. The gates of the next level are executed once all gates of the preceding level have been completed. A level compaction helps increase the parallelization of the circuit implementation and, therefore, not only optimizes the runtime of the circuit but also helps decrease the decoherence effects by shortening the overall execution time[[2](https://arxiv.org/abs/quant-ph/0604001)]. That increases the robustness and accuracy of the algorithm implementation, so if we could get a minimum number of circuit levels would be optimal!**Now let me address the main point, I added some barriers to the circuit in my code, and it was meant from them to provide a pleasant visualization of what the algorithm output, where they help me to distinguish the oracle from the diffuser part. But where I didn't pay attention is in the * Levels of a circuit * notion, and to explain why the `barrier` matter, I will show you how this gate can increase the depth of a circuit. I am going to use two circuits `d2` & `d3` where the former has no barriers whereas the latter does:** * d2 circuit (no barriers)
###Code
d2=QuantumCircuit(3)
d2.x(0)
d2.h(0)
d2.h(1)
d2.x(2)
d2.h(2)
print('The detpth of the circuit d2 equal:', d2.depth())
d2.draw('mpl')
###Output
The detpth of the circuit d2 equal: 2
###Markdown
* d3 circuit (with barriers)
###Code
d3=QuantumCircuit(3)
d3.x(0)
d3.h(0)
d3.barrier()
d3.h(1)
d3.barrier()
d3.x(2)
d3.h(2)
print('The detpth of the circuit d3 equal:', d3.depth())
d3.draw('mpl')
###Output
The detpth of the circuit d3 equal: 5
###Markdown
$\rightarrow$ **Therefore, the number of levels in the d3 circuit increases to $5$ so that the depth is equal to $5$, as opposed to the d2 circuit with only two levels (depth=$2$). See figure:**This shows that the functionality of adding barrier not just serves for a nice visualization but also prevent parallelization if needed (consequently expands the circuit depth).And let me illustrate how the depth decreases by running the code before and after committing (get rid of barriers) for both types of circuits `ancilla` & `noancilla`: * ***Before committing: `ancilla` circuit*** (with barriers)
###Code
r1, *_ = grover(['10001111'], "ancilla")
print('The circuit depth before a commit for ancilla circuit: ',r1.decompose().depth())
###Output
The circuit depth before a commit for ancilla circuit: 387
###Markdown
* ***After committing: `ancilla` circuit*** (no barriers)
###Code
r2, *_ = grover(['10001111'], "ancilla")
print('The circuit depth after a commit for ancilla circuit: ',r2.decompose().depth())
###Output
The circuit depth after a commit for ancilla circuit: 386
###Markdown
There is a one-unit reduction in circuit depth which is not a big deal. But more interestingly is the case of `noancilla` circuit.* ***Before committing: `noancilla` circuit*** (with barriers)
###Code
w1, *_ = grover(['10001111'], "noancilla")
print('The circuit depth before a commit for noancilla circuit: ',w1.decompose().depth())
###Output
The circuit depth before a commit for noancilla circuit: 123
###Markdown
* ***After doing a commit: `noancilla` circuit*** (no barriers)
###Code
w2, *_ = grover(['10001111'], "noancilla")
print('The circuit depth after a commit for noancilla circuit: ',w2.decompose().depth())
###Output
The circuit depth after a commit for noancilla circuit: 99
|
hackerrank_python.ipynb | ###Markdown
Swap Case
###Code
def swap_case(s):
char_list = []
for char in s:
if char.islower() == True:
char_list.append(char.upper())
elif char.isupper() == True:
char_list.append(char.lower())
else:
char_list.append(char)
return "".join(char_list)
swap_case("Www.HackerRank.com")
def swap_case(s):
return "".join([x.upper() if x.islower() else x.lower() for x in s])
swap_case("Www.HackerRank.com")
###Output
_____no_output_____
###Markdown
Mutations
###Code
def mutate_string(string, position, character):
str_list = []
for index,letter in enumerate(string):
if index != position:
str_list.append(letter)
elif index == position:
str_list.append(character)
return "".join(str_list)
mutate_string("Test",1,"o")
###Output
_____no_output_____
###Markdown
Print Function
###Code
n = 5
str_list = []
for i in range(1,n+1):
str_list.append(str(i))
print("".join(str_list))
###Output
_____no_output_____
###Markdown
Find a string
###Code
def count_substring(string, sub_string):
count = 0
low = 0
high = len(sub_string)
for _ in range(low, len(string)-len(sub_string)+1):
chunk = string[low:high]
if sub_string == chunk:
count += 1
low += 1
high += 1
return count
s = 'ABCDCDC'
sub = 'CDC'
count = count_substring(s,sub)
print(count)
###Output
2
###Markdown
List Comprehensions
###Code
x=1
y=1
z=1
dim=[0]*3
n=2
[x for x in [(str(x),str(y),str(z)) for x in dim]]
# x = int(input())
# y = int(input())
# x = int(input())
# n = int(input())
x = 1
y = 1
z = 1
n = 2
# Iteration approach
for i in range(x+1):
for j in range(y+1):
for k in range(z+1):
if sum([i,j,k]) != n:
print([i,j,k])
# List comprehension approach
[[i,j,k] for i in range(x+1) for j in range(y+1) for k in range(z+1) if sum([i,j,k]) != n]
###Output
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[1, 0, 0]
[1, 1, 1]
###Markdown
Finding the percentage
###Code
from statistics import mean
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
print("{:.2f}".format(round(mean(student_marks[query_name]),2)))
###Output
_____no_output_____
###Markdown
Compress the String!
###Code
s = '1222311'
s_set = set([x for x in s])
s_count = [s.count(x) for x in s_set]
result = map(lambda x, y:(x, y), s_count, s_set)
for pair in result:
print(tuple(pair))
###Output
_____no_output_____
###Markdown
Tuples
###Code
n = int(input())
integer_list = map(int, input().split())
print(hash(tuple(integer_list)))
###Output
_____no_output_____
###Markdown
Lists Nested Lists
###Code
# List apporach
name_lst = ["Harry", "Berry", "Tina", "Akriti", "Harsh"]
score_lst = [37.21, 37.21, 37.2, 41, 39]
# score_lst = [4, -50, -50, -50, 51]
# Hackerrank Input Code
# name_lst = []
# score_lst = []
# for _ in range(int(input())):
# name = input()
# score = float(input())
# name_lst.append(name)
# score_lst.append(score)
combo = []
for x, y in zip(score_lst, name_lst):
combo.append([x, y])
combo.sort()
score_set = sorted(set(score_lst))
value = score_set[1]
for i in combo:
if i[0] == value:
print(i[1])
# Dictionary approach, but dictionary needs to be sorted
grades = {}
for _ in range(int(input())):
name = input()
score = float(input())
grades.update({f"student{_}" : {"name" : name, "score" : score}})
values = []
for i in grades:
if grades[i]["score"] not in values:
values.append(grades[i]["score"])
values.sort()
for i in grades:
if grades[i]["score"] == values[1]:
print(grades[i]["name"])
###Output
_____no_output_____
###Markdown
Text Wrap
###Code
# Solution
import textwrap
s = 'ABCDEFGHIJKLIMNOQRSTUVWXYZ'
w = 4
def wrap(string, max_width):
return "\n".join(textwrap.wrap(string, max_width))
wrap(s, w)
import math
s = 'ABCDEFGHIJKLIMNOQRSTUVWXYZ'
w = 4
low = 0
high = math.floor(len(s)/w)
bottom = 0
top = w
for _ in range(low, high):
print(s[bottom:top])
bottom += w
top += w
else:
print(s[-(len(s)%w):])
###Output
ABCD
EFGH
IJKL
IMNO
QRST
UVWX
YZ
###Markdown
Capitalize!
###Code
def solve(s):
lst = s.split(" ")
title_lst = []
for word in lst:
if word == "":
title_lst.append("")
elif word[0].isdigit() == False:
title_lst.append(word.title())
else:
title_lst.append(word)
return " ".join(title_lst)
# Oneliner attempt
def solve(s):
return " ".join(["" if x == "" else x.title() if x[0].isdigit() == False else x for x in s.split(" ")])
# HackerRank top answer
for x in s[:].split():
s = s.replace(x, x.capitalize())
s = '1 w 2 r 3g'
# s = "hello world lol"
solve(s)
###Output
_____no_output_____
###Markdown
Find the Runner-Up Score!
###Code
arr = [2, 3, 6, 6, 5]
arr.sort()
amt = arr.count(max(arr))
for times in range(0, amt):
arr.remove(max(arr))
max(arr)
###Output
_____no_output_____
###Markdown
Loops
###Code
for i in range(0, 5):
print(i**2)
###Output
0
1
4
9
16
###Markdown
String Validators
###Code
s='qA2'
True in [x.isdigit() for x in s]
###Output
_____no_output_____
###Markdown
itertools.product()
###Code
from itertools import product
a = map(int, input().split())
b = map(int, input().split())
[print(x, end = " ") for x in list(product(list(a), list(b)))]
###Output
1 2
3 4
(1, 3) (1, 4) (2, 3) (2, 4)
###Markdown
Introduction to Sets
###Code
def average(array):
my_set = set(array)
return sum(my_set)/len(my_set)
###Output
_____no_output_____ |
notebooks/logistic regression/Madelon-new-methods.ipynb | ###Markdown
$\tau = 10^2$
###Code
model = LogisticRegressionGD(X_train, y_train, 1e2, device=device)
model.fit(1e-8, max_iter=100500)
preds = model.predict(X_test)
torch.mean((preds == y_test).double())
len(model.log)
experiment = Experiment(model.log, model.obj, values=model.value_log, device=device)
k = 6
experiment.run_method("RRE+QR", RRE, k, method_kwargs={"qr": True})
experiment.run_method("MMPE", MMPE, k)
experiment.run_method("TEA", TEA_solve, 2*k, input_size=k)
experiment.run_method("Regularized RRE", regularized_RRE, k, method_kwargs={"lambda_": 1e-15})
experiment.run_method("RNA+norm", RNA, k, method_kwargs={"lambda_range": (1e-15, 1e-2), "linesearch": False})
experiment.run_method("RNA+ls", RNA, k, method_kwargs={"lambda_range": (1e-15, 1e-2), "linesearch": True})
experiment.run_method("Epsilon inv", vector_epsilon_v1, 2*k, method_kwargs={"k": k})
experiment.run_method("Epsilon mult", vector_epsilon_v2, 2*k, method_kwargs={"k": k})
experiment.run_method("Topological epsilon", topological_vector_epsilon, 2*k, method_kwargs={"k": k})
plt.figure(figsize=(14, 8))
experiment.plot_values(n=2000)
plt.ylim(1220, 1300)
plt.legend()
plt.figure(figsize=(14, 8))
experiment.plot_log_diff(n=2000)
plt.legend()
model.theta = experiment.best_x
preds = model.predict(X_test)
torch.mean((preds == y_test).double())
df = experiment.value_df()
df.to_csv(f"results/madelon-new-methods:tau=1e2.csv")
###Output
_____no_output_____
###Markdown
$\tau = 10^{-3}$
###Code
model = LogisticRegressionGD(X_train, y_train, 1e-3, device=device)
model.fit(1e-8, max_iter=100500)
preds = model.predict(X_test)
torch.mean((preds == y_test).double())
len(model.log)
experiment = Experiment(model.log, model.obj, values=model.value_log, device=device)
k = 6
experiment.run_method("RRE+QR", RRE, k, method_kwargs={"qr": True})
experiment.run_method("MMPE", MMPE, k)
experiment.run_method("TEA", TEA_solve, 2*k, input_size=k)
experiment.run_method("Regularized RRE", regularized_RRE, k, method_kwargs={"lambda_": 1e-15})
experiment.run_method("RNA+norm", RNA, k, method_kwargs={"lambda_range": (1e-15, 1e-2), "linesearch": False})
experiment.run_method("RNA+ls", RNA, k, method_kwargs={"lambda_range": (1e-15, 1e-2), "linesearch": True})
experiment.run_method("Epsilon inv", vector_epsilon_v1, 2*k, method_kwargs={"k": k})
experiment.run_method("Epsilon mult", vector_epsilon_v2, 2*k, method_kwargs={"k": k})
experiment.run_method("Topological epsilon", topological_vector_epsilon, 2*k, method_kwargs={"k": k})
plt.figure(figsize=(14, 8))
experiment.plot_values(n=10000)
plt.ylim(1000, 1300)
plt.legend()
plt.figure(figsize=(14, 8))
experiment.plot_log_diff(n=80000)
plt.ylim(-1, 5)
plt.legend()
plt.figure(figsize=(14, 8))
experiment.plot_log_diff(n=80000, methods=["RRE+QR", "RNA+ls", "Epsilon inv", "Epsilon mult", "Regularized RRE"])
plt.ylim(-0, 5)
plt.legend()
model.theta = experiment.best_x
preds = model.predict(X_test)
torch.mean((preds == y_test).double())
df = experiment.value_df()
df.to_csv(f"results/madelon-new-methods:tau=1e-7.csv")
###Output
_____no_output_____ |
book/numpy/code/2_Array_manipulation_routines.ipynb | ###Markdown
Array manipulation routines
###Code
import numpy as np
np.__version__
###Output
_____no_output_____ |
Kaggle_notebooks/hpa-challenge-2021_hpa-competition-tfkeras-basemodel.ipynb | ###Markdown
Build a base model for the Human Protein Atlas - Single Cell Classification Competition using Tensorflow and Keras IntroductionImportant insights from the previous competition, from Ouyang et al. Nature Methods (2019) sections "Strategies used by the top-ranking solutions" and "Assessing the biological relevance of the winning model with class activation maps (CAMs)":1. Data augmentation such as random cropping, rotation, and flipping might improve model performance. 2. Modifications of the loss function. 3. DenseNet architecture more effective than ResNet. 4. Medium sized networks worked better than larger ones (for example DenseNet121 performed better than DenseNet169).5. Using larger image sizes might improve scores.6. Model ensembling and stacking might improve performance. 7. Class activation maps (CAMs) can be used for visualization of model spatial attention.Articles: [1] Ouyang, W., Winsnes, C.F., Hjelmare, M. et al. Analysis of the Human Protein Atlas Image Classification competition. Nat Methods 16, 1254–1261 (2019). https://doi.org/10.1038/s41592-019-0658-6Notebooks:(1) [DenseNet Trained with Old and New Data](https://www.kaggle.com/raimonds1993/aptos19-densenet-trained-with-old-and-new-data) by Federico Raimondi.(2) [Tutorial on Keras ImageDataGenerator with flow_from_dataframe](https://vijayabhaskar96.medium.com/tutorial-on-keras-imagedatagenerator-with-flow-from-dataframe-8bd5776e45c1) by Vijayabhaskar J. Datasets:(1) [HPA cell tiles sample balanced dataset: individual cells as RGB jpg images for rapid experimentation](https://www.kaggle.com/thedrcat/hpa-cell-tiles-sample-balanced-dataset) by Darek Kłeczek, a single-cell image version of the original dataset, below.(2) [Human Protein Atlas - Single Cell Classification Dataset](https://www.kaggle.com/c/hpa-single-cell-image-classification/data).Package documentation:(1) [Keras DenseNet121](https://keras.io/api/applications/densenet).(2) [Tensorflow Module: tf.keras.layers.experimental.preprocessing](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/).(3) [Tensorflow Data augmentation](https://www.tensorflow.org/tutorials/images/data_augmentation).(4) [Tensorflow Image classification](https://www.tensorflow.org/tutorials/images/classification).(5) [Tensorflow Image dataset from directory](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory).(6) [scikit-learn MultiLabelBinarizer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MultiLabelBinarizer.htmlsklearn.preprocessing.MultiLabelBinarizer). Tasks:1. Preprocessing:(1.1) Get unique single-cell image identifiers and multilabels.(1.2) Train and validation split.(1.3) Configure dataset for performance.2. Model definition.3. Training.4. Evaluation.
###Code
# libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import warnings
import os,gc,cv2
import shutil
import random
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
from sklearn.preprocessing import MultiLabelBinarizer
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.applications import DenseNet121
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import RMSprop
%matplotlib inline
warnings.filterwarnings('ignore')
# directories
CELL_IMG='../input/hpa-cell-tiles-sample-balanced-dataset/cells/'
CELL_DF='../input/hpa-cell-tiles-sample-balanced-dataset/cell_df.csv'
###Output
_____no_output_____
###Markdown
1. Pre-processing (1.1) Get unique single-cell image identifiers and multilabels
###Code
# loads train dataframe
train_df=pd.read_csv(CELL_DF)
train_df.head(n=10)
# spliting label column
train_df["image_labels"] = train_df["image_labels"].str.split("|")
# class labels
class_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18']
# binarizing each label/class
for label in tqdm(class_labels):
train_df[label] = train_df['image_labels'].map(lambda result: 1 if label in result else 0)
# rename column
train_df.columns = ['image_id', 'r_mean', 'g_mean', 'b_mean', 'cell_id', 'image_labels', 'size1', 'size2', 'Nucleoplasm', 'Nuclear membrane', 'Nucleoli', 'Nucleoli fibrillar center',
'Nuclear speckles', 'Nuclear bodies', 'Endoplasmic reticulum', 'Golgi apparatus', 'Intermediate filaments',
'Actin filaments', 'Microtubules', 'Mitotic spindle', 'Centrosome', 'Plasma membrane', 'Mitochondria',
'Aggresome', 'Cytosol', 'Vesicles and punctate cytosolic patterns', 'Negative']
train_df.head()
# creates a new column with unique identifiers for single-cell images
multinames = ['id', 'r_mean', 'g_mean', 'b_mean', 'image_labels', 'size1', 'size2', 'Nucleoplasm', 'Nuclear membrane', 'Nucleoli', 'Nucleoli fibrillar center',
'Nuclear speckles', 'Nuclear bodies', 'Endoplasmic reticulum', 'Golgi apparatus', 'Intermediate filaments',
'Actin filaments', 'Microtubules', 'Mitotic spindle', 'Centrosome', 'Plasma membrane', 'Mitochondria',
'Aggresome', 'Cytosol', 'Vesicles and punctate cytosolic patterns', 'Negative']
cell_df=train_df
cell_df["id"] = cell_df['image_id'] +'_'+ cell_df['cell_id'].astype(str)
cell_df["id"] = cell_df["id"] + '.jpg'
cell_df=cell_df.drop( columns=['image_id', 'cell_id'] )
cell_df=cell_df.reindex( columns= multinames )
cell_df.head()
# change order of ids as in the cells folder
cell_df=cell_df.sort_values('id', axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last')
cell_df.head()
# define multilabels for training
multilabels = ['Nucleoplasm', 'Nuclear membrane', 'Nucleoli', 'Nucleoli fibrillar center',
'Nuclear speckles', 'Nuclear bodies', 'Endoplasmic reticulum', 'Golgi apparatus', 'Intermediate filaments',
'Actin filaments', 'Microtubules', 'Mitotic spindle', 'Centrosome', 'Plasma membrane', 'Mitochondria',
'Aggresome', 'Cytosol', 'Vesicles and punctate cytosolic patterns', 'Negative']
print( len(multilabels), '\n')
###Output
19
###Markdown
(1.2) Train and validation split Use the tensorflow method 'flow_from_dataframe', as in this [notebook](https://www.kaggle.com/minniekabra/code-3may)
###Code
# constant parameters
IMG_SIZE = 224
BATCH_SIZE = 32
# image generator, rescaling is performed in a pre-processing layer below,
image_generator = image.ImageDataGenerator(
rescale=1./255,
data_format='channels_last',
preprocessing_function=None,
validation_split=0.2
)
# train set data flow from dataframe
train_data = image_generator.flow_from_dataframe(
cell_df,
directory=CELL_IMG,
x_col='id',
y_col=multilabels,
class_mode='raw',
color_mode='rgb',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
seed=123,
subset='training'
)
# validation set data flow from dataframe
validation_data = image_generator.flow_from_dataframe(
cell_df,
directory=CELL_IMG,
x_col='id',
y_col=multilabels,
class_mode='raw',
color_mode='rgb',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
seed=123,
subset='validation'
)
###Output
Found 34807 validated image filenames.
###Markdown
2. Model definition
###Code
# constant parameters for model definition
NUM_CLASSES=19
# DenseNet121 model
densenet = DenseNet121(
include_top=True,
weights=None,
input_shape=(IMG_SIZE,IMG_SIZE,3),
input_tensor=None,
pooling=None,
classes=NUM_CLASSES
)
# model definition including a normalization layer and extra layers
model_densenet = Sequential( [
layers.experimental.preprocessing.Rescaling( 1./255, input_shape=(IMG_SIZE, IMG_SIZE, 3) ),
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomFlip("vertical"),
layers.experimental.preprocessing.RandomTranslation(height_factor=0.1, width_factor=0.1),
layers.experimental.preprocessing.RandomRotation(factor=1.0),
layers.experimental.preprocessing.RandomZoom(height_factor=0.25, width_factor=0.25),
densenet
] )
# shape of the output ndarray
model_densenet.output
# compile model
learning_rate = 1e-3
model_densenet.compile(optimizer=Adam(lr=learning_rate),
loss='binary_crossentropy', metrics=['categorical_accuracy'])
# model summary
model_densenet.summary()
###Output
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
rescaling (Rescaling) (None, 224, 224, 3) 0
_________________________________________________________________
random_flip_4 (RandomFlip) (None, 224, 224, 3) 0
_________________________________________________________________
random_flip_5 (RandomFlip) (None, 224, 224, 3) 0
_________________________________________________________________
random_translation_2 (Random (None, 224, 224, 3) 0
_________________________________________________________________
random_rotation_2 (RandomRot (None, 224, 224, 3) 0
_________________________________________________________________
random_zoom_2 (RandomZoom) (None, 224, 224, 3) 0
_________________________________________________________________
densenet121 (Functional) (None, 19) 7056979
=================================================================
Total params: 7,056,979
Trainable params: 6,973,331
Non-trainable params: 83,648
_________________________________________________________________
###Markdown
3. Training
###Code
# constant training parameters
EPOCHS=10
# callbacks
model_callbacks = [
tf.keras.callbacks.EarlyStopping(monitor='loss', patience=2, verbose=0),
tf.keras.callbacks.ModelCheckpoint(filepath='./densenet_model.{epoch:02d}-{val_loss:.2f}.h5'),
tf.keras.callbacks.TensorBoard(log_dir='./logs'),
]
history = model_densenet.fit(
train_data,
validation_data=validation_data,
epochs=EPOCHS,
callbacks=model_callbacks
)
# plot model accuracy
plt.plot(history.history['categorical_accuracy'])
plt.plot(history.history['val_categorical_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# plot model loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
###Output
_____no_output_____ |
Python Text Basics/00-Working-with-Text-Files.ipynb | ###Markdown
___ ___ Working with Text FilesIn this section we'll cover * Working with f-strings (formatted string literals) to format printed text * Working with Files - opening, reading, writing and appending text files Formatted String Literals (f-strings) Introduced in Python 3.6, f-strings offer several benefits over the older `.format()` string method. For one, you can bring outside variables immediately into to the string rather than pass them through as keyword arguments:
###Code
name = 'Fred'
# Using the old .format() method:
print('His name is {var}.'.format(var=name))
# Using f-strings:
print(f'His name is {name}.')
###Output
His name is Fred.
His name is Fred.
###Markdown
Pass `!r` to get the string representation:
###Code
print(f'His name is {name!r}')
###Output
His name is 'Fred'
###Markdown
Be careful not to let quotation marks in the replacement fields conflict with the quoting used in the outer string:
###Code
d = {'a':123,'b':456}
print(f'Address: {d['a']} Main Street')
###Output
_____no_output_____
###Markdown
Instead, use different styles of quotation marks:
###Code
d = {'a':123,'b':456}
print(f"Address: {d['a']} Main Street")
###Output
Address: 123 Main Street
###Markdown
Minimum Widths, Alignment and PaddingYou can pass arguments inside a nested set of curly braces to set a minimum width for the field, the alignment and even padding characters.
###Code
library = [('Author', 'Topic', 'Pages'), ('Twain', 'Rafting', 601), ('Feynman', 'Physics', 95), ('Hamilton', 'Mythology', 144)]
for book in library:
print(f'{book[0]:{10}} {book[1]:{8}} {book[2]:{7}}')
###Output
Author Topic Pages
Twain Rafting 601
Feynman Physics 95
Hamilton Mythology 144
###Markdown
Here the first three lines align, except `Pages` follows a default left-alignment while numbers are right-aligned. Also, the fourth line's page number is pushed to the right as `Mythology` exceeds the minimum field width of `8`. When setting minimum field widths make sure to take the longest item into account.To set the alignment, use the character `` for right.To set padding, precede the alignment character with the padding character (`-` and `.` are common choices).Let's make some adjustments:
###Code
for book in library:
print(f'{book[0]:{10}} {book[1]:{10}} {book[2]:.>{7}}') # here .> was added
###Output
Author Topic ..Pages
Twain Rafting ....601
Feynman Physics .....95
Hamilton Mythology ....144
###Markdown
Date Formatting
###Code
from datetime import datetime
today = datetime(year=2018, month=1, day=27)
print(f'{today:%B %d, %Y}')
###Output
January 27, 2018
###Markdown
For more info on formatted string literals visit https://docs.python.org/3/reference/lexical_analysis.htmlf-strings*** FilesPython uses file objects to interact with external files on your computer. These file objects can be any sort of file you have on your computer, whether it be an audio file, a text file, emails, Excel documents, etc. Note: You will probably need to install certain libraries or modules to interact with those various file types, but they are easily available. (We will cover downloading modules later on in the course).Python has a built-in open function that allows us to open and play with basic file types. First we will need a file though. We're going to use some IPython magic to create a text file! Creating a File with IPython This function is specific to jupyter notebooks! Alternatively, quickly create a simple .txt file with Sublime text editor.
###Code
%%writefile test.txt
Hello, this is a quick test file.
This is the second line of the file.
###Output
Overwriting test.txt
###Markdown
Python Opening a File Know Your File's LocationIt's easy to get an error on this step:
###Code
myfile = open('whoops.txt')
###Output
_____no_output_____
###Markdown
To avoid this error, make sure your .txt file is saved in the same location as your notebook. To check your notebook location, use **pwd**:
###Code
pwd
###Output
_____no_output_____
###Markdown
**Alternatively, to grab files from any location on your computer, simply pass in the entire file path. **For Windows you need to use double \ so python doesn't treat the second \ as an escape character, a file path is in the form: myfile = open("C:\\Users\\YourUserName\\Home\\Folder\\myfile.txt")For MacOS and Linux you use slashes in the opposite direction: myfile = open("/Users/YourUserName/Folder/myfile.txt")
###Code
# Open the text.txt file we created earlier
my_file = open('test.txt')
my_file
###Output
_____no_output_____
###Markdown
`my_file` is now an open file object held in memory. We'll perform some reading and writing exercises, and then we have to close the file to free up memory. .read() and .seek()
###Code
# We can now read the file
my_file.read()
# But what happens if we try to read it again?
my_file.read()
###Output
_____no_output_____
###Markdown
This happens because you can imagine the reading "cursor" is at the end of the file after having read it. So there is nothing left to read. We can reset the "cursor" like this:
###Code
# Seek to the start of file (index 0)
my_file.seek(0)
# Now read again
my_file.read()
###Output
_____no_output_____
###Markdown
.readlines()You can read a file line by line using the readlines method. Use caution with large files, since everything will be held in memory. We will learn how to iterate over large files later in the course.
###Code
# Readlines returns a list of the lines in the file
my_file.seek(0)
my_file.readlines()
###Output
_____no_output_____
###Markdown
When you have finished using a file, it is always good practice to close it.
###Code
my_file.close()
###Output
_____no_output_____
###Markdown
Writing to a FileBy default, the `open()` function will only allow us to read the file. We need to pass the argument `'w'` to write over the file. For example:
###Code
# Add a second argument to the function, 'w' which stands for write.
# Passing 'w+' lets us read and write to the file
my_file = open('test.txt','w+')
###Output
_____no_output_____
###Markdown
**Use caution!**Opening a file with 'w' or 'w+' *truncates the original*, meaning that anything that was in the original file **is deleted**!
###Code
# Write to the file
my_file.write('This is a new first line')
# Read the file
my_file.seek(0)
my_file.read()
my_file.close() # always do this when you're done with a file
###Output
_____no_output_____
###Markdown
Appending to a FilePassing the argument `'a'` opens the file and puts the pointer at the end, so anything written is appended. Like `'w+'`, `'a+'` lets us read and write to a file. If the file does not exist, one will be created.
###Code
my_file = open('test.txt','a+')
my_file.write('\nThis line is being appended to test.txt')
my_file.write('\nAnd another line here.')
my_file.seek(0)
print(my_file.read())
my_file.close()
###Output
_____no_output_____
###Markdown
Appending with `%%writefile`Jupyter notebook users can do the same thing using IPython cell magic:
###Code
%%writefile -a test.txt
This is more text being appended to test.txt
And another line here.
###Output
Appending to test.txt
###Markdown
Add a blank space if you want the first line to begin on its own line, as Jupyter won't recognize escape sequences like `\n` Aliases and Context ManagersYou can assign temporary variable names as aliases, and manage the opening and closing of files automatically using a context manager:
###Code
with open('test.txt','r') as txt:
first_line = txt.readlines()[0]
print(first_line)
###Output
This is a new first line
###Markdown
Note that the `with ... as ...:` context manager automatically closed `test.txt` after assigning the first line of text to first_line:
###Code
txt.read()
###Output
_____no_output_____
###Markdown
Iterating through a File
###Code
with open('test.txt','r') as txt:
for line in txt:
print(line, end='') # the end='' argument removes extra linebreaks
###Output
This is a new first line
This line is being appended to test.txt
And another line here.
This is more text being appended to test.txt
And another line here. |
Project_SentitentAnalysis_food_reviews/VALLABHANENI_PROJECT.ipynb | ###Markdown
Big Data Project Improving Restaurant performance by performing Sentiment classification on online food reviews AbstractOnline food reviews are an important asset for users as well as for the restaurants serving the food. Most of the online reviews are in a free text format, which is difficult for the computer systems to analyze. In this project we will take the raw data from different social media websites, clean it to get the data with which we can work on and perform sentiment classification on the data using some famous classification algorithms to improve the quality of food from certain restaurants which has bad reviews and increase the overall performance of the restaurant. This project can further be expanded to predict what kind of food from a restaurant is likely to be ordered and if it is a healthy consumption. Related domain of studyThis project aims to improve the business performance of restaurants and fast foods by detecting what kind of food item on their menu brings their ratings down. This can be achieved by using Naïve Bayes algorithm. Data sourcesI will be using data from yelp.com- • https://www.yelp.com/dataset/download- • https://www.kaggle.com/yelp-dataset/yelp-datasetyelp_academic_dataset_review.json Gathering dataI wanted to work with the csv version of the files provided by yelp, and found them on kaggle.Luckily, i found one at https://www.kaggle.com/z5025122/yelp-csv.I downloaded the reviews file which contains the business_id, review, ratings and other columns along with the business file which had the corresponding restaurant names, addresses of the business_id in reviews file.The review file was very huge to process, so i decided to use only some part of the data instead the whole file and worked on the first 20,000 rows.
###Code
import pandas as pd
raw_review = pd.read_csv(".\data\yelp_review.csv", nrows=15000)
raw_review.head()
raw_review.to_csv(r'.\data\sampletextfile.csv')
###Output
_____no_output_____
###Markdown
Reducing the data by removing columns which are not needed.
###Code
#loading the data
reviews= pd.read_csv(".\data\sampletextfile.csv")
business = pd.read_csv(".\data\yelp_business.csv")
###Output
_____no_output_____
###Markdown
preview the data
###Code
reviews.head()
business.head()
###Output
_____no_output_____
###Markdown
Merge the two files(reviews dataframe and business dataframe) for easy use.
###Code
result = pd.merge(reviews,business, how='left', on=['business_id'])
result.head()
###Output
_____no_output_____
###Markdown
Testing the result dataframe
###Code
# return review of a restaurant whose business id is AE..A
result['text'].loc[result['business_id'] == 'AEx2SYEUJmTxVVB18LlCwA' ]
###Output
_____no_output_____
###Markdown
To keep this interesting, lets do some visualising on the existing data.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#we can create a new column in yelp called text length. This column will store the number of characters in each review.
result['text length'] = result['text'].apply(len)
# The graph plots text length vs stars
g = sns.FacetGrid(data=result, col='stars_x')
g.map(plt.hist, 'text length', bins=50)
###Output
_____no_output_____
###Markdown
According to the graph above, we understand that the longer the review, the star rating is more.
###Code
#This can also be shown on a boxplot
sns.boxplot(x='stars_x', y='text length', data=result)
###Output
_____no_output_____
###Markdown
Top 20 restaurants in the data set sorted on the basis of sum and mean of star Ratings.
###Code
info = pd.pivot_table(result,index=['name'],values=['stars_x'],
columns=[],aggfunc=[np.sum, np.mean],fill_value=0)
info.head(20)
#https://www.kaggle.com/ritzdevp/sentiment-analysis-using-nltk
###Output
_____no_output_____
###Markdown
Decribe the data
###Code
result.describe()
###Output
_____no_output_____
###Markdown
Sentiment AnalysisI used VADER to perform sentiment Analysis."VADER (Valence Aware Dictionary for sEntiment Reasoning) is a model used for text sentiment analysis that is sensitive to both polarity (positive/negative) and intensity (strength) of emotion."Further details about VADER can be found at http://datameetsmedia.com/vader-sentiment-analysis-explained/
###Code
all_reviews = result['text']
all_sent_values = []
all_sentiments = []
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def sentiment_value(paragraph):
analyser = SentimentIntensityAnalyzer()
res = analyser.polarity_scores(paragraph)
score = res['compound']
return round(score,1)
import nltk
nltk.download('vader_lexicon')
#testing VADER on the 101th review
sample = result['text'][101]
print(sample)
print('Sentiment: ')
print(sentiment_value(sample))
# to run VADER on the entire file (load all the 15000 rows)
temp_data = result[0:15000]
temp_data.shape
import timeit
start_time = timeit.default_timer() #to check time of execution
for i in range(0,15000):
all_sent_values.append(sentiment_value(all_reviews[i])) # will take a while
elapsed = timeit.default_timer() - start_time
elapsed #check time of evaluation
SENTIMENT_VALUE = []
SENTIMENT= []
for i in range(0,15000):
sent = all_sent_values[i]
if (sent<=1 and sent>=0.5):
SENTIMENT.append('V.Positive') # give a sentiment of v.positive to star ratings ranging from 0.5 to 1
SENTIMENT_VALUE.append(5)
elif (sent<0.5 and sent>0):
SENTIMENT.append('Positive') # give a sentiment of positive to star ratings ranging from 0.5 to 0
SENTIMENT_VALUE.append(4)
elif (sent==0):
SENTIMENT.append('Neutral') # give a sentiment of v.positive to star ratings of 0
SENTIMENT_VALUE.append(3)
elif (sent<0 and sent>=-0.5):
SENTIMENT.append('Negative') # give a sentiment of negative to star ratings ranging from -0.5 to 0
SENTIMENT_VALUE.append(2)
else:
SENTIMENT.append('V.Negative') # give a sentiment of v.negative to star ratings ranging from -0.5 t0 -1
SENTIMENT_VALUE.append(1)
#Append SENTIMENT and SENTIMENT_VALUE to the temp_data
temp_data['SENTIMENT_VALUE'] = SENTIMENT_VALUE
temp_data['SENTIMENT'] = SENTIMENT
#view dataframe
temp_data[['business_id','name','SENTIMENT_VALUE']].head(20)
#find accuracy between the user given star ratings and VADER generated sentiment value
counter = 0
for i in range(0,15000):
if (abs(temp_data['stars_x'][i]-temp_data['SENTIMENT_VALUE'][i])>1):
counter += 1
from __future__ import division
accuracy = (temp_data.shape[0]-counter) / temp_data.shape[0]
percent_accuracy = accuracy*100
percent_accuracy
###Output
_____no_output_____
###Markdown
The above analysis shows that there is 74.7% match between user given star ratings and VADER generated sentiment value Top 10 restaurants in the data set sorted on the basis of sum and mean of star Ratings and sentiment value.
###Code
testingtop10 = pd.pivot_table(temp_data,index=['name'],values=['stars_x','SENTIMENT_VALUE'],
columns=[],aggfunc=[np.sum, np.mean],fill_value=0)
#testing3 = testing3.sort_values(by=('sum'), ascending = False)
testingtop10.head(10)
###Output
_____no_output_____
###Markdown
Test the dataframe to get relevent reviews of relevent business
###Code
#get review of a business where business_id is CKC0-MOWMqoeWf6s-szl8g
temp_data['text'].loc[temp_data['business_id'] == 'CKC0-MOWMqoeWf6s-szl8g' ]
#get sentiment value and rating of a particular business id.
temp_data[['text','SENTIMENT_VALUE','stars_x']].loc[temp_data['business_id'] == "8QWPlVQ6D-OExqXoaD2Z1g" ]
###Output
_____no_output_____
###Markdown
Further analysisWe can't read eacha and every review of a particular review if the reviews list is huge. We can add name entity recognition to process the data further to give the restaurants the key words in the review along with the sentiment and sentiment valueI used spaCy to perform name entity recognition.https://spacy.io/usage/spacy-101
###Code
import spacy
import en_core_web_sm
nlp = en_core_web_sm.load()
#testing spacy
from __future__ import unicode_literals
doc = nlp("Next week I'll be in Madrid.")
for ent in doc.ents:
print(ent.text, ent.label_)
###Output
(u'Next week', u'DATE')
(u'Madrid', u'GPE')
###Markdown
To perform name entity recognition on the reviews , we need to know what items are in the menu and what are the common food items. I found data from https://github.com/vabraham/foodie_favorites/tree/master/data which had the restaurant menu items in the form of pickles
###Code
import pandas as pd
import csv
from six.moves import cPickle as pickle
import numpy as np
main_food_df = pd.read_pickle(".\data\word_list.pkl") #load common food
menu_list_prep = pd.read_pickle(".\data\menu_list.pkl") #load menu list
menu_prep_two =[name[:-1] if name.endswith('\r') else name for name in menu_list_prep] #clean the unwanted data
import unicodedata
menu_prep_three_df = [x.encode('UTF8') for x in menu_prep_two]
from __future__ import unicode_literals
menu_d_df=[]
for item in menu_prep_three_df:
if len(item.split()) < 3:
menu_d_df.append(item)
import spacy
import en_core_web_sm
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
nlp = en_core_web_sm.load()
patterns = [nlp(item.decode('utf8')) for item in main_food_df] # process each word to create phrase pattern
matcher = PhraseMatcher(nlp.vocab)
matcher.add('MAINFOOD', None, *patterns) # add patterns to matcher
patternstwo = [nlp(itemtwo.decode('utf8')) for itemtwo in menu_d_df] # process each word to create phrase pattern
matcher = PhraseMatcher(nlp.vocab)
matcher.add('INMENU', None, *patternstwo) # add patterns to matcher
doc = nlp("I love the Asparagus Prawn from this place")
matches = matcher(doc)
for match_id, start, end in matches:
# create a new Span for each match and use the match_id as the label
span = Span(doc, start, end, label=match_id)
doc.ents = list(doc.ents) + [span] # add span to doc.ents
print([(ent.text, ent.label_) for ent in doc.ents])
###Output
[(u'the', u'ORG'), (u'Asparagus', u'INMENU'), (u'Prawn', u'INMENU')]
|
Python/Beginner/week-02-unit-01-practice.ipynb | ###Markdown
Practice Session 1--- **Use functions to get library details**Such as- get_all_books- get_books_by_category- get_books_by_tagsetc Practice Session 2--- **1. Temperature Converter** Tc=(5/9)*(Tf-32) Tf=(9/5)*Tc+32Problem: - Store farenheit in a variable and calculate celcius from formula above. - Store celcius in a variable and calculate farenheit from formula above. **2. Leap year checker** - leap year divisible by four - but not by 100 unless it is divisible by 400 - example 1992, 1996 and 2000 are leap year - example 1993 and 1900 are not Problem: - create a list with multiple year - loop through that list and check if that is leap year or not **3. Reverse every second string** string = "The quick brown fox jumps over a lazy dog."Problem: - reverse every second word on above string e.g quick gets reversed while brown doesnotHint: - use string methods split and join
###Code
string = "The quick brown fox jumps over a lazy dog."
str_list = string.split()
str_list
for i in range(1, len(str_list), 2):
print(str_list[i])
' '.join(str_list)
###Output
_____no_output_____ |
jupyter-notebooks/privacy_policy_predictor.ipynb | ###Markdown
###Code
import random
good_privacy = []
with open("good_privacy.txt", "r") as f:
for line in f:
good_privacy.append(line.rstrip())
bad_privacy_raw = []
with open("bad_privacy.txt", "r") as f:
for line in f:
bad_privacy_raw.append(line.rstrip())
bad_privacy = bad_privacy_raw[: -6 or None]
from sklearn.model_selection import train_test_split
# splitting the model into X and Y training and test data
# X is good, Y is bad
# we will represent X with 0 and Y with 1
X_train, X_test, y_train, y_test = train_test_split(good_privacy, bad_privacy, test_size=0.2, random_state=42)
combined_train = []
for item in X_train:
combined_train.append([item, 0])
for item in y_train:
combined_train.append([item, 1])
random.shuffle(combined_train)
training_data = []
training_target = []
for item in combined_train:
training_data.append(item[0])
training_target.append(item[1])
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
text_clf = text_clf.fit(training_data, training_target)
combined_test = []
for item in X_test:
combined_test.append([item, 0])
for item in y_test:
combined_test.append([item, 1])
random.shuffle(combined_test)
test_data = []
test_target = []
for item in combined_test:
test_data.append(item[0])
test_target.append(item[1])
import numpy as np
predicted = text_clf.predict(test_data)
np.mean(predicted == test_target)
for idx, item in enumerate(test_data):
print(test_data[idx], 'predicted:', predicted[idx], 'actual:', test_target[idx])
from sklearn.linear_model import SGDClassifier
text_clf_svm = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf-svm', SGDClassifier(loss='hinge', penalty='l2',
alpha=1e-3, random_state=42))
])
text_clf_svm = text_clf_svm.fit(training_data, training_target)
predicted_svm = text_clf_svm.predict(test_data)
np.mean(predicted_svm == test_target)
from sklearn.model_selection import GridSearchCV
parameters = {'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (1e-2, 1e-3),
}
gs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1)
gs_clf = gs_clf.fit(training_data, training_target)
gs_clf.best_score_
gs_clf.best_params_
predicted_gs_clf = gs_clf.predict(test_data)
np.mean(predicted_gs_clf == test_target)
parameters_svm = {'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf-svm__alpha': (1e-2, 1e-3),
}
gs_clf_svm = GridSearchCV(text_clf_svm, parameters_svm, n_jobs=-1)
gs_clf_svm = gs_clf_svm.fit(training_data, training_target)
gs_clf_svm.best_score_
gs_clf_svm.best_params_
predicted_gs_clf_svm = gs_clf_svm.predict(test_data)
np.mean(predicted_gs_clf_svm == test_target)
###Output
_____no_output_____ |
notebooks/07_exif_tag_extraction.ipynb | ###Markdown
Image Processing
###Code
# imports
import os
from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
from datetime import datetime
import pytz
from arcgis.geometry import Geometry
def _convert_to_decimal_degress(value):
"""
Helper function to convert the GPS coordinates stored in the EXIF to degress in float format
:param value: tuple read from EXIF with DMS coordinate values
:return: float
"""
# get the respective degrees, minutes and seconds
d = float(value[0][0]) / float(value[0][1])
m = float(value[1][0]) / float(value[1][1])
s = float(value[2][0]) / float(value[2][1])
# combine degrees, minutes and seconds into decimal degrees
return d + (m / 60.0) + (s / 3600.0)
class Img():
"""
Make it easier to access image properties.
"""
def __init__(self, image_path):
self.file = os.path.abspath(image_path)
self.exif = self._get_exif(image_path)
def _get_exif(self, img):
"""
Extract the image EXIF data to a dictionary.
:param img: String path to the image with EXIF data to be parsed.
:return dict: All tags in very raw format extracted from the image.
"""
try:
# get the exif dictionary
exif_dict = {TAGS.get(tag, tag): value for tag, value in Image.open(img)._getexif().items()}
# clean up the GPS tags to be human readable as well
if 'GPSInfo' in exif_dict.keys():
exif_dict['GPSInfo'] = {GPSTAGS.get(key,key): exif_dict['GPSInfo'][key] for key in exif_dict['GPSInfo'].keys()}
return exif_dict
except Exception as e:
print(f'ERROR on {img}')
@property
def geometry(self):
"""
Get a point geometry from the GPS dictionary extracted from the image EXIF data.
:return Point Geometry: Location where the image was captured.
"""
if self.has_location:
gps_dict = self.exif['GPSInfo']
# extract the longitude and latitude values as decimal degrees
coord_lat = _convert_to_decimal_degress(gps_dict['GPSLatitude'])
coord_lon = _convert_to_decimal_degress(gps_dict['GPSLongitude'])
# assign the correct positive or negative value based on hemisphere
coord_lon = -coord_lon if gps_dict['GPSLongitudeRef'] is 'W' else coord_lon
coord_lat = -coord_lat if gps_dict['GPSLatitudeRef'] is 'S' else coord_lat
# create a geometry object from the coordinates
return Geometry({'x': coord_lon, 'y': coord_lat, 'spatialReference': {'wkid': 4326}})
else:
return None
@property
def point(self):
"""
Get a point geometry from the GPS dictionary extracted from the image EXIF data.
:return Point Geometry: Location where the image was captured.
"""
return self.geometry
@property
def location(self):
"""
Get a point geometry from the GPS dictionary extracted from the image EXIF data.
:return Point Geometry: Location where the image was captured.
"""
return self.geometry
@property
def gps_datetime(self):
"""
Get the datetime from the GPS information in the EXIF data.
:param gps_dict: GPS dictionary extracted from the EXIF dictionary.
:return datetime: Datetime object when image was captured according to the GPS timestamp.
"""
if self.has_location:
gps_dict = self.exif['GPSInfo']
# extract the hour, minute and second from the GPS information
gps_time = gps_dict['GPSTimeStamp']
h = int(gps_time[0][0] / gps_time[0][1])
m = int(gps_time[1][0] / gps_time[1][1])
s = int(gps_time[2][0] / gps_time[2][1])
# extract the year, month and day from the GPS information
gps_date = [int(val) for val in gps_dict['GPSDateStamp'].split(':')]
# create a datetime object with the extracted values
return datetime(gps_date[0], gps_date[1], gps_date[2], h, m, s, tzinfo=pytz.utc)
else:
return None
@property
def has_location(self):
if 'GPSInfo' in self.exif.keys():
return True
else:
return False
@property
def properites(self):
return {
'file': self.file,
'exif': self.exif,
'geometry': self.geometry,
'gps_datetime': self.gps_datetime
}
# minimal module imports
from arcgis.features import GeoAccessor, GeoSeriesAccessor
import pandas as pd
import imghdr
import os
from arcgis.gis import GIS
# a couple of handy variables and settings to get started
data = r'../data'
data_raw = os.path.join(data, 'raw')
data_raw_image_dir = os.path.join(data_raw, 'images')
# get all the images to be processed
img_file_lst = [
os.path.abspath(os.path.join(data_raw_image_dir, img))
for img in os.listdir(data_raw_image_dir)
]
img_file_lst = [Img(img) for img in img_file_lst if imghdr.what(img)]
img = img_file_lst[0]
img
print(f'unlocated: {len([img.file for img in img_file_lst if not img.has_location])}')
print(f'located: {len([img.file for img in img_file_lst if img.has_location])}')
df = pd.DataFrame(
[[img.file, img.gps_datetime, img.geometry] for img in img_file_lst],
columns=['file', 'datetime', 'SHAPE']
)
df.spatial.set_geometry('SHAPE')
df.head()
mp = GIS().map('Olympia, WA')
mp.basemap = 'dark-gray-vector'
df.spatial.plot(map_widget=mp)
mp.tab_mode = "split-right"
mp
###Output
_____no_output_____ |
06_reproducibility/model_versioning.ipynb | ###Markdown
Model Versioning Design PatternIn the Model Versioning design pattern, backward compatibility is achieved by deploying a changed model as a microservice with a different REST endpoint. This is a necessary prerequisite for many of the other patterns discussed in this chapter.
###Code
import json
import numpy as np
import pandas as pd
import xgboost as xgb
import tensorflow as tf
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from google.cloud import bigquery
###Output
_____no_output_____
###Markdown
Download and preprocess dataYou'll need to authenticate to your Google Cloud to run the BigQuery query below.
###Code
from google.colab import auth
auth.authenticate_user()
###Output
_____no_output_____
###Markdown
In the following cell, replace `your-cloud-project` with the name of your GCP project.
###Code
# Note: this query may take a few minutes to run
%%bigquery df --project your-cloud-project
SELECT
arr_delay,
carrier,
origin,
dest,
dep_delay,
taxi_out,
distance
FROM
`cloud-training-demos.flights.tzcorr`
WHERE
extract(year from fl_date) = 2015
ORDER BY fl_date ASC
LIMIT 300000
df = df.dropna()
df = shuffle(df, random_state=2)
df.head()
# Only include origins and destinations that occur frequently in the dataset
df = df[df['origin'].map(df['origin'].value_counts()) > 500]
df = df[df['dest'].map(df['dest'].value_counts()) > 500]
df = pd.get_dummies(df, columns=['carrier', 'origin', 'dest'])
###Output
_____no_output_____
###Markdown
Model version 1: predict whether or not the flight is > 30 min delayed
###Code
# Create a boolean column to indicate whether flight was > 30 mins delayed
df.loc[df['arr_delay'] >= 30, 'arr_delay_bool'] = 1
df.loc[df['arr_delay'] < 30, 'arr_delay_bool'] = 0
df['arr_delay_bool'].value_counts()
classify_model_labels = df['arr_delay_bool']
classify_model_data = df.drop(columns=['arr_delay', 'arr_delay_bool'])
x,y = classify_model_data,classify_model_labels
x_train,x_test,y_train,y_test = train_test_split(x,y)
model = xgb.XGBRegressor(
objective='reg:logistic'
)
# Given the dataset size, this may take 1-2 minutes to run
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
acc = accuracy_score(y_test, np.round(y_pred))
print(acc)
# Save the model
model.save_model('model.bst')
###Output
_____no_output_____
###Markdown
Deploying classification model to AI PlatformReplace `your-cloud-project` below with the name of your cloud project.
###Code
# Set your cloud project
PROJECT = 'your-cloud-project'
!gcloud config set project $PROJECT
BUCKET = PROJECT + '_flight_model_bucket'
# Create a bucket if you don't have one
# You only need to run this once
!gsutil mb gs://$BUCKET
!gsutil cp 'model.bst' gs://$BUCKET
# Create the model resource
!gcloud ai-platform models create flight_delay_prediction
# Create the version
!gcloud ai-platform versions create 'v1' \
--model 'flight_delay_prediction' \
--origin gs://$BUCKET \
--runtime-version=1.15 \
--framework 'XGBOOST' \
--python-version=3.7
# Get a prediction on the first example from our test set
!rm input.json
num_examples = 10
with open('input.json', 'a') as f:
for i in range(num_examples):
f.write(str(x_test.iloc[i].values.tolist()))
f.write('\n')
!cat input.json
# Make a prediction to the deployed model
!gcloud ai-platform predict --model 'flight_delay_prediction' --version \
'v1' --json-instances 'input.json'
# Compare this with actual values
print(y_test.iloc[:5])
###Output
140323 0.0
66094 0.0
63096 0.0
192118 0.0
222633 0.0
Name: arr_delay_bool, dtype: float64
###Markdown
Model version 2: replace XGBoost with TensorFlow
###Code
tf_model = tf.keras.Sequential([
tf.keras.layers.Dense(32, activation='relu', input_shape=[len(x_train.iloc[0])]),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
tf_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
tf_model.fit(x_train, y_train, epochs=10, validation_split=0.1)
###Output
Epoch 1/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.2069 - accuracy: 0.9468 - val_loss: 0.1038 - val_accuracy: 0.9635
Epoch 2/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.1114 - accuracy: 0.9609 - val_loss: 0.1003 - val_accuracy: 0.9654
Epoch 3/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.1042 - accuracy: 0.9630 - val_loss: 0.1198 - val_accuracy: 0.9578
Epoch 4/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.1006 - accuracy: 0.9640 - val_loss: 0.1009 - val_accuracy: 0.9638
Epoch 5/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.0993 - accuracy: 0.9643 - val_loss: 0.1046 - val_accuracy: 0.9619
Epoch 6/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.0971 - accuracy: 0.9654 - val_loss: 0.1015 - val_accuracy: 0.9643
Epoch 7/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.0968 - accuracy: 0.9652 - val_loss: 0.0989 - val_accuracy: 0.9654
Epoch 8/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.0962 - accuracy: 0.9653 - val_loss: 0.1154 - val_accuracy: 0.9583
Epoch 9/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.0956 - accuracy: 0.9659 - val_loss: 0.0987 - val_accuracy: 0.9644
Epoch 10/10
4847/4847 [==============================] - 8s 2ms/step - loss: 0.0949 - accuracy: 0.9658 - val_loss: 0.0988 - val_accuracy: 0.9658
###Markdown
Note that accuracy will be similar to the XGBoost model. We're just using this to demonstrate how training a model with a different framework could be deployed as a new version.
###Code
metrics = tf_model.evaluate(x_test, y_test)
print(metrics)
###Output
1795/1795 [==============================] - 2s 1ms/step - loss: 0.0954 - accuracy: 0.9661
[0.09541851282119751, 0.9660840034484863]
###Markdown
Next we'll deploy the updated TF model to AI Platform as a v2.
###Code
tf_model_path = 'gs://' + BUCKET + '/tf'
tf_model.save(tf_model_path, save_format='tf')
!gcloud ai-platform versions create 'v2' \
--model 'flight_delay_prediction' \
--origin $tf_model_path \
--runtime-version=2.1 \
--framework 'TENSORFLOW' \
--python-version=3.7
# Make a prediction to the new version
!gcloud ai-platform predict --model 'flight_delay_prediction' --version \
'v2' --json-instances 'input.json'
###Output
_____no_output_____
###Markdown
Alternative: reframe as a regression problemIn this case, you'd likely want to create a new model resource since the response format of your model has changed.
###Code
regression_model_labels = df['arr_delay']
regression_model_data = df.drop(columns=['arr_delay', 'arr_delay_bool'])
x,y = regression_model_data,regression_model_labels
x_train,x_test,y_train,y_test = train_test_split(x,y)
model = xgb.XGBRegressor(
objective='reg:linear'
)
# This will take 1-2 minutes to run
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
for i,val in enumerate(y_pred[:10]):
print(val)
print(y_test.iloc[i])
print()
model.save_model('model.bst')
!gsutil cp model.bst gs://$BUCKET/regression/
!gcloud ai-platform models create 'flights_regression'
# Create the version
!gcloud ai-platform versions create 'v1' \
--model 'flights_regression' \
--origin gs://$BUCKET/regression \
--runtime-version=1.15 \
--framework 'XGBOOST' \
--python-version=3.7
!gcloud ai-platform predict --model 'flighs_regression' --version \
'v1' --json-instances 'input.json'
###Output
_____no_output_____ |
notebooks/labor_search.ipynb | ###Markdown
Tutorial 5: Labor searchThe most-requested feature of SSJ toolkit v0.1 has been support for time-varying transition matrices for exogenous states in HA blocks. This is needed, for example, for models in which the probability of becoming unemployed varies with the business cycle. This feature is supported by HetBlocks in 1.0.0 and newer versions of the toolkit. In this notebook, we show how to implement it.
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
1 Model setupConsider a simple incomplete markets model with unemployment risk. The state variables are - employment status $s \in \{E, U\}$- labor productivity $e \in \{e_1, \dots, e_m\}$- liquid assets $a \in [\underline{a}, \infty)$ Employment status matters for income. Employed workers earn wage $w_t e$, while unemployed workers receive benefits $b e.$ Let $y_t(s, e)$ denote this non-financial income.From the workers' perspective, employment status evolves exogenously. An employed worker may lose her job with probability $\sigma_t$. An unemployed worker may find a job with probability $f_t.$ Let $\Pi_t^s$ denote the Markov matrix associated with state $s$. Of course, these transition probabilities may be determined endogenously outside the HetBlock, e.g. in a search and matching block.The Bellman equation is$$\begin{align*}V_t(s, e, a_{-}) = \max_{c, a} &\left\{\frac{c^{1-\sigma}}{1-\sigma} + \beta \mathbb{E}_t\left[V_{t+1}(s', e', a)|s, e \right] \right\}\\c + a &= (1 + r_t)a_{-} + y_t(s, e)\\a &\geq \underline{a}\end{align*}$$ 2 Set up HA blockStarting in versio 1.0.0, HetBlocks support an arbitrary number of discrete exogenous state variables. The only restriction is on timing. We assume that when agents choose their endogenous state(s) (in this case assets $a$), all discrete shocks have already been resolved. The Bellman equation reflects this timing assumption. The relevant state is `(s, e, a_{-})`, i.e. `(s, e)` are from the current period, while `a_{-}` is from last period.Some specific assumption on timing is necessary for the HetBlock to know how to take expectations with respect to exogenous states (backward iteration) and how to construct the law of motion for the distribution (forward iteration). The one we chose (discrete shocks before decisions) is the most common in the macro models.In this paradigm, the HA problem we're dealing with is just a special case of SIM model that we used in the Krusell-Smith model. Compared to `sequence_jacobian/examples/hetblocks/household_sim`, we just need to make 3 small changes.1. Make sure that numpy expressions work for 3-dimensional arrays (state space is `(s, e, a)` and not `(e, a)`).2. Provide names of `exogenous` Markov matrices in *chronological order*.3. Write hetinput function that maps job-finding and separation rates into the Markov matrix $\Pi_t^s$.Once this is done. Everything will work as expected. We can shock and compute Jacobians with respect to $f_t$ and so on. Step 1: hetinputs
###Code
from sequence_jacobian import markov_rouwenhorst, agrid
def make_grid(rho_e, sd_e, nE, amin, amax, nA):
e_grid, _, Pi_e = markov_rouwenhorst(rho=rho_e, sigma=sd_e, N=nE)
a_grid = agrid(amin=amin, amax=amax, n=nA)
return e_grid, Pi_e, a_grid
def search_frictions(f, s):
Pi_s = np.vstack(([1 - s, s], [f, 1 - f]))
return Pi_s
def labor_income(e_grid, w, b):
y = np.vstack((w * e_grid, b * e_grid))
return y
###Output
_____no_output_____
###Markdown
Step 2: core HA block
###Code
from sequence_jacobian import het, interpolate_y
def household_init(a_grid, y, r, eis):
c = np.maximum(1e-8, y[..., np.newaxis] + np.maximum(r, 0.04) * a_grid)
Va = (1 + r) * (c ** (-1 / eis))
return Va
@het(exogenous=['Pi_s', 'Pi_e'], policy='a', backward='Va', backward_init=household_init)
def household(Va_p, a_grid, y, r, beta, eis):
c_nextgrid = (beta * Va_p) ** (-eis)
coh = (1 + r) * a_grid + y[..., np.newaxis]
a = interpolate_y(c_nextgrid + a_grid, coh, a_grid) # (x, xq, y)
a = np.maximum(a, a_grid[0])
c = coh - a
uc = c ** (-1 / eis)
Va = (1 + r) * uc
return Va, a, c
###Output
_____no_output_____
###Markdown
Step 4: hetoutputsLet's report the unemployment rate.
###Code
def unemployment(c):
u = np.zeros_like(c)
u[1, ...] = 1.0
return u
###Output
_____no_output_____
###Markdown
Step 4: assemble HA block
###Code
hh = household.add_hetinputs([make_grid, search_frictions, labor_income])
hh = hh.add_hetoutputs([unemployment])
print(hh)
print(f'Inputs: {hh.inputs}')
print(f'Macro outputs: {hh.outputs}')
print(f'Micro outputs: {hh.internals}')
###Output
<HetBlock 'household' with hetinput 'search_frictions_labor_income' and with hetoutput `unemployment'>
Inputs: ['r', 'beta', 'eis', 'rho_e', 'sd_e', 'nE', 'amin', 'amax', 'nA', 'f', 's', 'w', 'b']
Macro outputs: ['A', 'C', 'U']
Micro outputs: ['D', 'Dbeg', 'Pi_s', 'Pi_e', 'Va', 'a', 'c', 'u', 'e_grid', 'a_grid', 'y']
###Markdown
3 Use HA blockLet's consider a baseline calibration and a shock to the separation rate.
###Code
calibration = dict(beta=0.95, r=0.01, eis=0.5, f=0.4, s=0.1, w=1., b=0.5,
rho_e=0.95, sd_e=0.5, nE=5, amin=0., amax=50, nA=100)
ss = hh.steady_state(calibration)
###Output
_____no_output_____
###Markdown
Let's plot consumption function of worker with average productivity in employment vs unemployment.
###Code
plt.plot(ss.internals['household']['a_grid'], ss.internals['household']['c'][0, 3, :], label='employed')
plt.plot(ss.internals['household']['a_grid'], ss.internals['household']['c'][1, 3, :], label='unemployed')
plt.xlabel('Assets')
plt.ylabel('Consumption')
plt.legend()
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Consider the impulse responses of consumption end employment to a persistent rise in the separation rate.
###Code
td = hh.impulse_linear(ss, {'s': 0.6 ** np.arange(50)})
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.flatten()
ax[0].plot(td['s'], color='black', label='separation rate')
ax[0].axhline(0, color='gray', linestyle=':')
ax[0].set_title('Shock')
ax[0].set_ylabel('deviation from ss')
ax[0].legend(frameon=False)
ax[1].plot(td['C'], label='consumption')
ax[1].plot(td['U'], label='unemployment rate')
ax[1].axhline(0, color='gray', linestyle=':')
ax[1].legend(frameon=False)
ax[1].set_title('Impulse Responses')
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
homework/HM2/HM2_solution.ipynb | ###Markdown
HM2: Numerical Optimization for Logistic Regression. Name: Vivek Gupta 0. You will do the following:1. Read the lecture note: [click here](https://github.com/wangshusen/DeepLearning/blob/master/LectureNotes/Logistic/paper/logistic.pdf)2. Read, complete, and run my code.3. **Implement mini-batch SGD** and evaluate the performance.4. Convert the .IPYNB file to .HTML file. * The HTML file must contain **the code** and **the output after execution**. * Missing **the output after execution** will not be graded. 5. Upload this .HTML file to your Google Drive, Dropbox, or your Github repo. (If you submit the file to Google Drive or Dropbox, you must make the file "open-access". The delay caused by "deny of access" may result in late penalty.)6. Submit the link to this .HTML file to Canvas. * Example: https://github.com/wangshusen/CS583-2020S/blob/master/homework/HM2/HM2.html Grading criteria:1. When computing the ```gradient``` and ```objective function value``` using a batch of samples, use **matrix-vector multiplication** rather than a FOR LOOP of **vector-vector multiplications**.2. Plot ```objective function value``` against ```epochs```. In the plot, compare GD, SGD, and MB-SGD (with $b=8$ and $b=64$). The plot must look reasonable. 1. Data processing- Download the Diabete dataset from https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/diabetes- Load the data using sklearn.- Preprocess the data. 1.1. Load the data
###Code
from sklearn import datasets
import numpy
x_sparse, y = datasets.load_svmlight_file('diabetes')
x = x_sparse.todense()
print('Shape of x: ' + str(x.shape))
print('Shape of y: ' + str(y.shape))
###Output
Shape of x: (768, 8)
Shape of y: (768,)
###Markdown
1.2. Partition to training and test sets
###Code
# partition the data to training and test sets
n = x.shape[0]
n_train = 640
n_test = n - n_train
rand_indices = numpy.random.permutation(n)
train_indices = rand_indices[0:n_train]
test_indices = rand_indices[n_train:n]
x_train = x[train_indices, :]
x_test = x[test_indices, :]
y_train = y[train_indices].reshape(n_train, 1)
y_test = y[test_indices].reshape(n_test, 1)
print('Shape of x_train: ' + str(x_train.shape))
print('Shape of x_test: ' + str(x_test.shape))
print('Shape of y_train: ' + str(y_train.shape))
print('Shape of y_test: ' + str(y_test.shape))
###Output
Shape of x_train: (640, 8)
Shape of x_test: (128, 8)
Shape of y_train: (640, 1)
Shape of y_test: (128, 1)
###Markdown
1.3. Feature scaling Use the standardization to trainsform both training and test features
###Code
# Standardization
import numpy
# calculate mu and sig using the training set
d = x_train.shape[1]
mu = numpy.mean(x_train, axis=0).reshape(1, d)
sig = numpy.std(x_train, axis=0).reshape(1, d)
# transform the training features
x_train = (x_train - mu) / (sig + 1E-6)
# transform the test features
x_test = (x_test - mu) / (sig + 1E-6)
print('test mean = ')
print(numpy.mean(x_test, axis=0))
print('test std = ')
print(numpy.std(x_test, axis=0))
###Output
test mean =
[[ 0.12119465 -0.12147522 -0.09482972 0.08344122 0.24990793 0.0348497
-0.00806198 -0.07339132]]
test std =
[[1.21852695 0.94224975 0.97020855 1.0205081 1.39152442 0.98688641
1.03284792 0.94275082]]
###Markdown
1.4. Add a dimension of all ones
###Code
n_train, d = x_train.shape
x_train = numpy.concatenate((x_train, numpy.ones((n_train, 1))), axis=1)
n_test, d = x_test.shape
x_test = numpy.concatenate((x_test, numpy.ones((n_test, 1))), axis=1)
print('Shape of x_train: ' + str(x_train.shape))
print('Shape of x_test: ' + str(x_test.shape))
###Output
Shape of x_train: (640, 9)
Shape of x_test: (128, 9)
###Markdown
2. Logistic regression modelThe objective function is $Q (w; X, y) = \frac{1}{n} \sum_{i=1}^n \log \Big( 1 + \exp \big( - y_i x_i^T w \big) \Big) + \frac{\lambda}{2} \| w \|_2^2 $.
###Code
# Calculate the objective function value
# Inputs:
# w: d-by-1 matrix
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# Return:
# objective function value (scalar)
def objective(w, x, y, lam):
n, d = x.shape
yx = numpy.multiply(y, x) # n-by-d matrix
yxw = numpy.dot(yx, w) # n-by-1 matrix
vec1 = numpy.exp(-yxw) # n-by-1 matrix
vec2 = numpy.log(1 + vec1) # n-by-1 matrix
loss = numpy.mean(vec2) # scalar
reg = lam / 2 * numpy.sum(w * w) # scalar
return loss + reg
# initialize w
d = x_train.shape[1]
w = numpy.zeros((d, 1))
# evaluate the objective function value at w
lam = 1E-6
objval0 = objective(w, x_train, y_train, lam)
print('Initial objective function value = ' + str(objval0))
###Output
Initial objective function value = 0.6931471805599453
###Markdown
3. Numerical optimization 3.1. Gradient descent The gradient at $w$ is $g = - \frac{1}{n} \sum_{i=1}^n \frac{y_i x_i }{1 + \exp ( y_i x_i^T w)} + \lambda w$
###Code
# Calculate the gradient
# Inputs:
# w: d-by-1 matrix
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# Return:
# g: g: d-by-1 matrix, full gradient
def gradient(w, x, y, lam):
n, d = x.shape
yx = numpy.multiply(y, x) # n-by-d matrix
yxw = numpy.dot(yx, w) # n-by-1 matrix
vec1 = numpy.exp(yxw) # n-by-1 matrix
vec2 = numpy.divide(yx, 1+vec1) # n-by-d matrix
vec3 = -numpy.mean(vec2, axis=0).reshape(d, 1) # d-by-1 matrix
g = vec3 + lam * w
return g
# Gradient descent for solving logistic regression
# Inputs:
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# stepsize: scalar
# max_iter: integer, the maximal iterations
# w: d-by-1 matrix, initialization of w
# Return:
# w: d-by-1 matrix, the solution
# objvals: a record of each iteration's objective value
def grad_descent(x, y, lam, stepsize, max_iter=100, w=None):
n, d = x.shape
objvals = numpy.zeros(max_iter) # store the objective values
if w is None:
w = numpy.zeros((d, 1)) # zero initialization
for t in range(max_iter):
objval = objective(w, x, y, lam)
objvals[t] = objval
print('Objective value at t=' + str(t) + ' is ' + str(objval))
g = gradient(w, x, y, lam)
w -= stepsize * g
return w, objvals
###Output
_____no_output_____
###Markdown
Run gradient descent.
###Code
lam = 1E-6
stepsize = 1.0
w, objvals_gd = grad_descent(x_train, y_train, lam, stepsize)
###Output
Objective value at t=0 is 0.6931471805599453
Objective value at t=1 is 0.5865652561533802
Objective value at t=2 is 0.5443740919412662
Objective value at t=3 is 0.5219834562586072
Objective value at t=4 is 0.5081126529408855
Objective value at t=5 is 0.49876362083550696
Objective value at t=6 is 0.49213001642970056
Objective value at t=7 is 0.4872587483882272
Objective value at t=8 is 0.48359221340915864
Objective value at t=9 is 0.480780095547017
Objective value at t=10 is 0.4785908159337757
Objective value at t=11 is 0.47686539981192033
Objective value at t=12 is 0.47549149511707167
Objective value at t=13 is 0.47438783588880845
Objective value at t=14 is 0.473494508158861
Objective value at t=15 is 0.47276662246583856
Objective value at t=16 is 0.47217007679848266
Objective value at t=17 is 0.47167864894653666
Objective value at t=18 is 0.47127195942416555
Objective value at t=19 is 0.47093401862322454
Objective value at t=20 is 0.470652174299978
Objective value at t=21 is 0.4704163383964725
Objective value at t=22 is 0.4702184119093109
Objective value at t=23 is 0.47005185219614465
Objective value at t=24 is 0.4699113440590905
Objective value at t=25 is 0.46979254733579134
Objective value at t=26 is 0.4696919015093288
Objective value at t=27 is 0.469606473239933
Objective value at t=28 is 0.4695338365074736
Objective value at t=29 is 0.4694719777447263
Objective value at t=30 is 0.4694192202757191
Objective value at t=31 is 0.46937416377849656
Objective value at t=32 is 0.4693356355222552
Objective value at t=33 is 0.4693026508917668
Objective value at t=34 is 0.46927438128171645
Objective value at t=35 is 0.46925012787249437
Objective value at t=36 is 0.46922930012436004
Objective value at t=37 is 0.46921139807555606
Objective value at t=38 is 0.46919599772127846
Objective value at t=39 is 0.4691827388985917
Objective value at t=40 is 0.46917131521784833
Objective value at t=41 is 0.4691614656716837
Objective value at t=42 is 0.4691529676239925
Objective value at t=43 is 0.4691456309378109
Objective value at t=44 is 0.4691392930460323
Objective value at t=45 is 0.46913381480488003
Objective value at t=46 is 0.46912907699899287
Objective value at t=47 is 0.46912497739031067
Objective value at t=48 is 0.46912142822186426
Objective value at t=49 is 0.4691183541029449
Objective value at t=50 is 0.4691156902146739
Objective value at t=51 is 0.46911338078527315
Objective value at t=52 is 0.4691113777927697
Objective value at t=53 is 0.4691096398598246
Objective value at t=54 is 0.46910813131111895
Objective value at t=55 is 0.46910682136849086
Objective value at t=56 is 0.4691056834629679
Objective value at t=57 is 0.46910469464613375
Objective value at t=58 is 0.46910383508601056
Objective value at t=59 is 0.46910308763493763
Objective value at t=60 is 0.4691024374588501
Objective value at t=61 is 0.46910187171897916
Objective value at t=62 is 0.4691013792983498
Objective value at t=63 is 0.46910095056660234
Objective value at t=64 is 0.469100577177627
Objective value at t=65 is 0.469100251895317
Objective value at t=66 is 0.46909996844344
Objective value at t=67 is 0.4690997213762089
Objective value at t=68 is 0.46909950596663735
Objective value at t=69 is 0.46909931811017713
Objective value at t=70 is 0.4690991542415067
Objective value at t=71 is 0.46909901126263615
Objective value at t=72 is 0.46909888648075904
Objective value at t=73 is 0.469098777554506
Objective value at t=74 is 0.46909868244744085
Objective value at t=75 is 0.46909859938780757
Objective value at t=76 is 0.4690985268336705
Objective value at t=77 is 0.4690984634427157
Objective value at t=78 is 0.4690984080460774
Objective value at t=79 is 0.46909835962564483
Objective value at t=80 is 0.46909831729438006
Objective value at t=81 is 0.4690982802792405
Objective value at t=82 is 0.46909824790635546
Objective value at t=83 is 0.46909821958815634
Objective value at t=84 is 0.4690981948121983
Objective value at t=85 is 0.4690981731314469
Objective value at t=86 is 0.46909815415583805
Objective value at t=87 is 0.46909813754493873
Objective value at t=88 is 0.4690981230015651
Objective value at t=89 is 0.4690981102662301
Objective value at t=90 is 0.46909809911231376
Objective value at t=91 is 0.4690980893418571
Objective value at t=92 is 0.4690980807819015
Objective value at t=93 is 0.4690980732813012
Objective value at t=94 is 0.46909806670794485
Objective value at t=95 is 0.46909806094633666
Objective value at t=96 is 0.4690980558954867
Objective value at t=97 is 0.46909805146707234
Objective value at t=98 is 0.4690980475838351
Objective value at t=99 is 0.4690980441781824
###Markdown
3.2. Stochastic gradient descent (SGD)Define $Q_i (w) = \log \Big( 1 + \exp \big( - y_i x_i^T w \big) \Big) + \frac{\lambda}{2} \| w \|_2^2 $.The stochastic gradient at $w$ is $g_i = \frac{\partial Q_i }{ \partial w} = -\frac{y_i x_i }{1 + \exp ( y_i x_i^T w)} + \lambda w$.
###Code
# Calculate the objective Q_i and the gradient of Q_i
# Inputs:
# w: d-by-1 matrix
# xi: 1-by-d matrix
# yi: scalar
# lam: scalar, the regularization parameter
# Return:
# obj: scalar, the objective Q_i
# g: d-by-1 matrix, gradient of Q_i
def stochastic_objective_gradient(w, xi, yi, lam):
d = xi.shape[0]
yx = yi * xi # 1-by-d matrix
yxw = float(numpy.dot(yx, w)) # scalar
# calculate objective function Q_i
loss = numpy.log(1 + numpy.exp(-yxw)) # scalar
reg = lam / 2 * numpy.sum(w * w) # scalar
obj = loss + reg
# calculate stochastic gradient
g_loss = -yx.T / (1 + numpy.exp(yxw)) # d-by-1 matrix
g = g_loss + lam * w # d-by-1 matrix
return obj, g
# SGD for solving logistic regression
# Inputs:
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# stepsize: scalar
# max_epoch: integer, the maximal epochs
# w: d-by-1 matrix, initialization of w
# Return:
# w: the solution
# objvals: record of each iteration's objective value
def sgd(x, y, lam, stepsize, max_epoch=100, w=None):
n, d = x.shape
objvals = numpy.zeros(max_epoch) # store the objective values
if w is None:
w = numpy.zeros((d, 1)) # zero initialization
for t in range(max_epoch):
# randomly shuffle the samples
rand_indices = numpy.random.permutation(n)
x_rand = x[rand_indices, :]
y_rand = y[rand_indices, :]
objval = 0 # accumulate the objective values
for i in range(n):
xi = x_rand[i, :] # 1-by-d matrix
yi = float(y_rand[i, :]) # scalar
obj, g = stochastic_objective_gradient(w, xi, yi, lam)
objval += obj
w -= stepsize * g
stepsize *= 0.9 # decrease step size
objval /= n
objvals[t] = objval
print('Objective value at epoch t=' + str(t) + ' is ' + str(objval))
return w, objvals
###Output
_____no_output_____
###Markdown
Run SGD.
###Code
lam = 1E-6
stepsize = 0.1
w, objvals_sgd = sgd(x_train, y_train, lam, stepsize)
###Output
Objective value at epoch t=0 is 0.5153369296192298
Objective value at epoch t=1 is 0.5168320392171399
Objective value at epoch t=2 is 0.5037899610175551
Objective value at epoch t=3 is 0.5026569106045661
Objective value at epoch t=4 is 0.5036955206584983
Objective value at epoch t=5 is 0.49985898401466267
Objective value at epoch t=6 is 0.49242873546684623
Objective value at epoch t=7 is 0.48622332821612535
Objective value at epoch t=8 is 0.4927342157640592
Objective value at epoch t=9 is 0.4862775097759292
Objective value at epoch t=10 is 0.49041582124913174
Objective value at epoch t=11 is 0.48527318675927267
Objective value at epoch t=12 is 0.48433437623252323
Objective value at epoch t=13 is 0.4819671001476988
Objective value at epoch t=14 is 0.48186898070401335
Objective value at epoch t=15 is 0.47964937298073673
Objective value at epoch t=16 is 0.4811225666133671
Objective value at epoch t=17 is 0.47795657260437946
Objective value at epoch t=18 is 0.4780020142518967
Objective value at epoch t=19 is 0.4771513649165403
Objective value at epoch t=20 is 0.4768922285258168
Objective value at epoch t=21 is 0.47547636678684135
Objective value at epoch t=22 is 0.4746570586959945
Objective value at epoch t=23 is 0.47457009995863936
Objective value at epoch t=24 is 0.4736839060215957
Objective value at epoch t=25 is 0.4735893322796746
Objective value at epoch t=26 is 0.4733979595573251
Objective value at epoch t=27 is 0.47276468880706213
Objective value at epoch t=28 is 0.47246112410982305
Objective value at epoch t=29 is 0.4721646206778112
Objective value at epoch t=30 is 0.4719214762217853
Objective value at epoch t=31 is 0.47163110269565467
Objective value at epoch t=32 is 0.4712791361759866
Objective value at epoch t=33 is 0.47116096398903445
Objective value at epoch t=34 is 0.47093220486402554
Objective value at epoch t=35 is 0.4707392586739926
Objective value at epoch t=36 is 0.47055992280407405
Objective value at epoch t=37 is 0.4703904730584859
Objective value at epoch t=38 is 0.4702865165517077
Objective value at epoch t=39 is 0.4702009315842958
Objective value at epoch t=40 is 0.4700749585591805
Objective value at epoch t=41 is 0.46999385378731606
Objective value at epoch t=42 is 0.4698963468487289
Objective value at epoch t=43 is 0.46982078865669685
Objective value at epoch t=44 is 0.4697497648956813
Objective value at epoch t=45 is 0.4696873483986449
Objective value at epoch t=46 is 0.46962807514809135
Objective value at epoch t=47 is 0.4695751962759601
Objective value at epoch t=48 is 0.4695308961617649
Objective value at epoch t=49 is 0.46948348474956436
Objective value at epoch t=50 is 0.46944802926523205
Objective value at epoch t=51 is 0.469413377114555
Objective value at epoch t=52 is 0.4693816590065728
Objective value at epoch t=53 is 0.46935419807795914
Objective value at epoch t=54 is 0.4693279044291339
Objective value at epoch t=55 is 0.4693048900963638
Objective value at epoch t=56 is 0.46928460016444734
Objective value at epoch t=57 is 0.46926668712156144
Objective value at epoch t=58 is 0.4692493375486332
Objective value at epoch t=59 is 0.4692344188759674
Objective value at epoch t=60 is 0.46922107719609335
Objective value at epoch t=61 is 0.4692086337695877
Objective value at epoch t=62 is 0.4691976093766209
Objective value at epoch t=63 is 0.46918767443150233
Objective value at epoch t=64 is 0.4691788340577948
Objective value at epoch t=65 is 0.4691707906630139
Objective value at epoch t=66 is 0.4691635018699262
Objective value at epoch t=67 is 0.46915702451908076
Objective value at epoch t=68 is 0.4691511502035435
Objective value at epoch t=69 is 0.46914588655739
Objective value at epoch t=70 is 0.4691410193300724
Objective value at epoch t=71 is 0.46913682832819903
Objective value at epoch t=72 is 0.4691329633964771
Objective value at epoch t=73 is 0.46912948629941037
Objective value at epoch t=74 is 0.46912637471946816
Objective value at epoch t=75 is 0.4691235580015939
Objective value at epoch t=76 is 0.4691210249033544
Objective value at epoch t=77 is 0.4691187417750111
Objective value at epoch t=78 is 0.46911669302762526
Objective value at epoch t=79 is 0.46911485458057606
Objective value at epoch t=80 is 0.4691131910253488
Objective value at epoch t=81 is 0.46911169181223056
Objective value at epoch t=82 is 0.46911035296340253
Objective value at epoch t=83 is 0.46910914093480843
Objective value at epoch t=84 is 0.4691080491988099
Objective value at epoch t=85 is 0.4691070701952613
Objective value at epoch t=86 is 0.46910618631027096
Objective value at epoch t=87 is 0.4691053918950609
Objective value at epoch t=88 is 0.4691046769186961
Objective value at epoch t=89 is 0.46910403202245393
Objective value at epoch t=90 is 0.4691034534948063
Objective value at epoch t=91 is 0.4691029322379537
Objective value at epoch t=92 is 0.4691024623946657
Objective value at epoch t=93 is 0.4691020402247424
Objective value at epoch t=94 is 0.46910166001160725
Objective value at epoch t=95 is 0.469101317897551
Objective value at epoch t=96 is 0.46910100994769266
Objective value at epoch t=97 is 0.46910073273409675
Objective value at epoch t=98 is 0.469100483306739
Objective value at epoch t=99 is 0.46910025877833544
###Markdown
4. Compare GD with SGDPlot objective function values against epochs.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(6, 4))
epochs_gd = range(len(objvals_gd))
epochs_sgd = range(len(objvals_sgd))
line0, = plt.plot(epochs_gd, objvals_gd, '--b', LineWidth=4)
line1, = plt.plot(epochs_sgd, objvals_sgd, '-r', LineWidth=2)
plt.xlabel('Epochs', FontSize=20)
plt.ylabel('Objective Value', FontSize=20)
plt.xticks(FontSize=16)
plt.yticks(FontSize=16)
plt.legend([line0, line1], ['GD', 'SGD'], fontsize=20)
plt.tight_layout()
plt.show()
fig.savefig('compare_gd_sgd.pdf', format='pdf', dpi=1200)
###Output
<ipython-input-13-9b8d8f4cdfb5>:9: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
line0, = plt.plot(epochs_gd, objvals_gd, '--b', LineWidth=4)
<ipython-input-13-9b8d8f4cdfb5>:10: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
line1, = plt.plot(epochs_sgd, objvals_sgd, '-r', LineWidth=2)
<ipython-input-13-9b8d8f4cdfb5>:11: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
plt.xlabel('Epochs', FontSize=20)
<ipython-input-13-9b8d8f4cdfb5>:12: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
plt.ylabel('Objective Value', FontSize=20)
<ipython-input-13-9b8d8f4cdfb5>:13: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
plt.xticks(FontSize=16)
<ipython-input-13-9b8d8f4cdfb5>:14: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
plt.yticks(FontSize=16)
###Markdown
5. Prediction
###Code
# Predict class label
# Inputs:
# w: d-by-1 matrix
# X: m-by-d matrix
# Return:
# f: m-by-1 matrix, the predictions
def predict(w, X):
xw = numpy.dot(X, w)
f = numpy.sign(xw)
return f
# evaluate training error
f_train = predict(w, x_train)
diff = numpy.abs(f_train - y_train) / 2
error_train = numpy.mean(diff)
print('Training classification error is ' + str(error_train))
# evaluate test error
f_test = predict(w, x_test)
diff = numpy.abs(f_test - y_test) / 2
error_test = numpy.mean(diff)
print('Test classification error is ' + str(error_test))
###Output
Test classification error is 0.2109375
###Markdown
6. Mini-batch SGD (fill the code) 6.1. Compute the objective $Q_I$ and its gradient using a batch of samplesDefine $Q_I (w) = \frac{1}{b} \sum_{i \in I} \log \Big( 1 + \exp \big( - y_i x_i^T w \big) \Big) + \frac{\lambda}{2} \| w \|_2^2 $, where $I$ is a set containing $b$ indices randomly drawn from $\{ 1, \cdots , n \}$ without replacement.The stochastic gradient at $w$ is $g_I = \frac{\partial Q_I }{ \partial w} = \frac{1}{b} \sum_{i \in I} \frac{- y_i x_i }{1 + \exp ( y_i x_i^T w)} + \lambda w$.
###Code
# Calculate the objective Q_I and the gradient of Q_I
# Inputs:
# w: d-by-1 matrix
# xi: b-by-d matrix
# yi: b-by-1 matrix
# lam: scalar, the regularization parameter
# b: integer, the batch size
# Return:
# obj: scalar, the objective Q_i
# g: d-by-1 matrix, gradient of Q_i
def mb_stochastic_objective_gradient(w, xi, yi, lam, b):
# Fill the function
# Follow the implementation of stochastic_objective_gradient
# Use matrix-vector multiplication; do not use FOR LOOP of vector-vector multiplications
yx = numpy.multiply(yi, xi) # b-by-d matrix
yxw = numpy.dot(yx, w) # b-by-1 matrix
vec1 = numpy.exp(-yxw) # b-by-1 matrix
vec2 = numpy.log(1 + vec1) # b-by-1 matrix
# calculate objective function Q_i
loss = numpy.mean(vec2) # scalar
reg = lam/2 * numpy.sum(w * w) # scalar
obj = loss + reg # scalar
# calculate mini batch stochastic gradient
g_loss = (yx)/(1 + numpy.exp(yxw)) # b-by-d matrix
g_loss = -numpy.mean(g_loss, axis=0).reshape(d, 1) # d-by-1 matrix
g = g_loss + lam * w # d-by-1 matrix
return obj, g
###Output
_____no_output_____
###Markdown
6.2. Implement mini-batch SGDHints:1. In every epoch, randomly permute the $n$ samples (just like SGD).2. Each epoch has $\frac{n}{b}$ iterations. In every iteration, use $b$ samples, and compute the gradient and objective using the ``mb_stochastic_objective_gradient`` function. In the next iteration, use the next $b$ samples, and so on.
###Code
# Mini-Batch SGD for solving logistic regression
# Inputs:
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# b: integer, the batch size
# stepsize: scalar
# max_epoch: integer, the maximal epochs
# w: d-by-1 matrix, initialization of w
# Return:
# w: the solution
# objvals: record of each iteration's objective value
numpy.random.seed(1)
def mb_sgd(x, y, lam, b, stepsize, max_epoch=100, w=None):
# Fill the function
# Follow the implementation of sgd
# Record one objective value per epoch (not per iteration!)
n, d = x.shape
num_batches = int(n/b)
objvals = numpy.zeros(max_epoch) # store the objective values
if w is None:
w = numpy.zeros((d, 1)) # zero initialization
for t in range(max_epoch):
# randomly shuffle the samples
rand_indices = numpy.random.permutation(n)
x_rand = x[rand_indices, :]
y_rand = y[rand_indices, :]
objval = numpy.zeros(num_batches)
for i in range(0, n, b):
xi = x_rand[i:i+b,:] # 1-by-d matrix
yi = y_rand[i:i+b,:] # 1-by-d matrix
obj, g = mb_stochastic_objective_gradient(w, xi, yi, lam, b)
objval[int(i/b)] = obj
w -= stepsize * g
stepsize *= 0.9 # decrease step size
objval = numpy.mean(objval)
objvals[t] = objval
print('Objective value at epoch t=' + str(t) + ' is ' + str(objval))
return w, objvals
###Output
_____no_output_____
###Markdown
6.3. Run MB-SGD
###Code
# MB-SGD with batch size b=8
lam = 1E-6 # do not change
b = 8 # do not change
stepsize = 0.1 # you must tune this parameter
w, objvals_mbsgd8 = mb_sgd(x_train, y_train, lam, b, stepsize)
# MB-SGD with batch size b=64
lam = 1E-6 # do not change
b = 64 # do not change
stepsize = 0.5 # you must tune this parameter
w, objvals_mbsgd64 = mb_sgd(x_train, y_train, lam, b, stepsize)
###Output
Objective value at epoch t=0 is 0.5714872438922091
Objective value at epoch t=1 is 0.4937277396702558
Objective value at epoch t=2 is 0.48018824529121973
Objective value at epoch t=3 is 0.47637987015341743
Objective value at epoch t=4 is 0.4741710265839659
Objective value at epoch t=5 is 0.472909718291885
Objective value at epoch t=6 is 0.4724545263183546
Objective value at epoch t=7 is 0.47146710203383274
Objective value at epoch t=8 is 0.47151989746552425
Objective value at epoch t=9 is 0.4711773937768194
Objective value at epoch t=10 is 0.4713026344856712
Objective value at epoch t=11 is 0.4709160031196376
Objective value at epoch t=12 is 0.47082044449194393
Objective value at epoch t=13 is 0.47053834085181323
Objective value at epoch t=14 is 0.4702875318963658
Objective value at epoch t=15 is 0.47013328987177677
Objective value at epoch t=16 is 0.4698385450515283
Objective value at epoch t=17 is 0.4699470163665511
Objective value at epoch t=18 is 0.4697669178258274
Objective value at epoch t=19 is 0.4697113645451646
Objective value at epoch t=20 is 0.469867364890149
Objective value at epoch t=21 is 0.46949993551038094
Objective value at epoch t=22 is 0.4697057052886695
Objective value at epoch t=23 is 0.4696694298062017
Objective value at epoch t=24 is 0.4694601002301388
Objective value at epoch t=25 is 0.4693833817665851
Objective value at epoch t=26 is 0.4694892686877094
Objective value at epoch t=27 is 0.46937116601813456
Objective value at epoch t=28 is 0.46937278597978604
Objective value at epoch t=29 is 0.4693723975760954
Objective value at epoch t=30 is 0.46926150600889527
Objective value at epoch t=31 is 0.4692908543192198
Objective value at epoch t=32 is 0.4692965141050439
Objective value at epoch t=33 is 0.4693022504188894
Objective value at epoch t=34 is 0.46926883799009567
Objective value at epoch t=35 is 0.46921274825975035
Objective value at epoch t=36 is 0.4692283127256801
Objective value at epoch t=37 is 0.46923340227144267
Objective value at epoch t=38 is 0.4692327819645528
Objective value at epoch t=39 is 0.46918643450367964
Objective value at epoch t=40 is 0.4692194838381643
Objective value at epoch t=41 is 0.46918155757824326
Objective value at epoch t=42 is 0.4692019582613215
Objective value at epoch t=43 is 0.46916502954616
Objective value at epoch t=44 is 0.46917068370070664
Objective value at epoch t=45 is 0.4691585252841177
Objective value at epoch t=46 is 0.46916292862675163
Objective value at epoch t=47 is 0.46915653520199924
Objective value at epoch t=48 is 0.46914604218901595
Objective value at epoch t=49 is 0.4691572847402129
Objective value at epoch t=50 is 0.4691374473543101
Objective value at epoch t=51 is 0.469132918059388
Objective value at epoch t=52 is 0.46914108677628513
Objective value at epoch t=53 is 0.4691395390362814
Objective value at epoch t=54 is 0.4691331394804175
Objective value at epoch t=55 is 0.46912920874782255
Objective value at epoch t=56 is 0.4691351562053045
Objective value at epoch t=57 is 0.4691285813524736
Objective value at epoch t=58 is 0.4691305301522345
Objective value at epoch t=59 is 0.46912588985691206
Objective value at epoch t=60 is 0.4691295806434918
Objective value at epoch t=61 is 0.4691257945875222
Objective value at epoch t=62 is 0.46912570672566706
Objective value at epoch t=63 is 0.46912296333799597
Objective value at epoch t=64 is 0.469124861897518
Objective value at epoch t=65 is 0.4691229513584503
Objective value at epoch t=66 is 0.46912262450629283
Objective value at epoch t=67 is 0.46912219693524565
Objective value at epoch t=68 is 0.4691217805761941
Objective value at epoch t=69 is 0.4691217858659139
Objective value at epoch t=70 is 0.46912079265221596
Objective value at epoch t=71 is 0.4691204063504627
Objective value at epoch t=72 is 0.46912058756893493
Objective value at epoch t=73 is 0.4691204252379226
Objective value at epoch t=74 is 0.46911996413251444
Objective value at epoch t=75 is 0.46912022196846825
Objective value at epoch t=76 is 0.46911950990103585
Objective value at epoch t=77 is 0.4691197005850423
Objective value at epoch t=78 is 0.46911913705522884
Objective value at epoch t=79 is 0.4691190486798787
Objective value at epoch t=80 is 0.46911901641737286
Objective value at epoch t=81 is 0.46911911544731594
Objective value at epoch t=82 is 0.46911885138625503
Objective value at epoch t=83 is 0.4691189082514601
Objective value at epoch t=84 is 0.4691186593279662
Objective value at epoch t=85 is 0.4691184873664228
Objective value at epoch t=86 is 0.4691186373626288
Objective value at epoch t=87 is 0.46911847565107206
Objective value at epoch t=88 is 0.46911860597795013
Objective value at epoch t=89 is 0.46911840159993146
Objective value at epoch t=90 is 0.4691184650924871
Objective value at epoch t=91 is 0.4691183436077779
Objective value at epoch t=92 is 0.46911833435190414
Objective value at epoch t=93 is 0.46911831585093877
Objective value at epoch t=94 is 0.46911824200138785
Objective value at epoch t=95 is 0.46911826233623427
Objective value at epoch t=96 is 0.469118214285866
Objective value at epoch t=97 is 0.46911818973489083
Objective value at epoch t=98 is 0.469118099934599
Objective value at epoch t=99 is 0.46911812733542246
###Markdown
7. Plot and compare GD, SGD, and MB-SGD You are required to compare the following algorithms:- Gradient descent (GD)- SGD- MB-SGD with b=8- MB-SGD with b=64Follow the code in Section 4 to plot ```objective function value``` against ```epochs```. There should be four curves in the plot; each curve corresponds to one algorithm. Hint: Logistic regression with $\ell_2$-norm regularization is a strongly convex optimization problem. All the algorithms will converge to the same solution. **In the end, the ``objective function value`` of the 4 algorithms will be the same. If not the same, your implementation must be wrong. Do NOT submit wrong code and wrong result!**
###Code
# plot the 4 curves:
fig = plt.figure(figsize=(6, 4))
epochs_gd = range(len(objvals_gd))
epochs_sgd = range(len(objvals_sgd))
line0, = plt.plot(epochs_gd, objvals_gd, '--b', LineWidth=4)
line1, = plt.plot(epochs_sgd, objvals_sgd, '-r', LineWidth=2)
line2, = plt.plot(epochs_sgd, objvals_mbsgd8, '-g', LineWidth=2)
line3, = plt.plot(epochs_sgd, objvals_mbsgd64, '-y', LineWidth=2)
plt.xlabel('Epochs', FontSize=20)
plt.ylabel('Objective Value', FontSize=20)
plt.xticks(FontSize=16)
plt.yticks(FontSize=16)
plt.legend([line0, line1, line2, line3], ['GD', 'SGD', 'MBSGD8', 'MBSGD64'], fontsize=20)
plt.tight_layout()
plt.show()
fig.savefig('compare_gd_sgd_mbsgd8_mbsgd64.pdf', format='pdf', dpi=1200)
###Output
<ipython-input-21-49e099489796>:7: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
line0, = plt.plot(epochs_gd, objvals_gd, '--b', LineWidth=4)
<ipython-input-21-49e099489796>:8: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
line1, = plt.plot(epochs_sgd, objvals_sgd, '-r', LineWidth=2)
<ipython-input-21-49e099489796>:9: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
line2, = plt.plot(epochs_sgd, objvals_mbsgd8, '-g', LineWidth=2)
<ipython-input-21-49e099489796>:10: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
line3, = plt.plot(epochs_sgd, objvals_mbsgd64, '-y', LineWidth=2)
<ipython-input-21-49e099489796>:12: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
plt.xlabel('Epochs', FontSize=20)
<ipython-input-21-49e099489796>:13: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
plt.ylabel('Objective Value', FontSize=20)
<ipython-input-21-49e099489796>:14: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
plt.xticks(FontSize=16)
<ipython-input-21-49e099489796>:15: MatplotlibDeprecationWarning: Case-insensitive properties were deprecated in 3.3 and support will be removed two minor releases later
plt.yticks(FontSize=16)
|
Titanic Learning from Disaster.ipynb | ###Markdown
Titanic Learning from DisasterWe would like to predict survival on the Titanic.https://www.kaggle.com/c/titanic
###Code
%matplotlib inline
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Let's Explore the data
###Code
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train.shape
train.head()
train.Age.hist()
train.Age.describe()
train[train['Age'] > 60][['Sex', 'Pclass', 'Age', 'Survived']]
###Output
_____no_output_____
###Markdown
Now I'm starting to see a pattern here. Let's see how many female survived.
###Code
females = train[train['Sex'] == 'female']
females_who_survived = females[females['Survived'] == 1]
females_who_survived.shape
males = train[train['Sex'] == 'male']
males_who_survived = males[males['Survived'] == 1]
males_who_survived.shape
###Output
_____no_output_____
###Markdown
Looks like the majority of people who survived are females. Random Forest
###Code
import pylab as pl
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import r2_score
test.head()
cols = ['Age', 'Pclass']
notnull_age = train[cols][train['Age'].notnull()]
notnull_survived = train['Survived'][train['Age'].notnull()]
notnull_age.head()
clf = RandomForestClassifier(n_estimators=20, max_features=2, min_samples_split=5)
clf.fit(notnull_age, notnull_survived)
notnull_test = test[cols][test['Age'].notnull()]
clf.predict(notnull_test)
###Output
_____no_output_____ |
courses/Improving Deep Neural Networks Hyperparameter tuning Regularization and Optimization/WEEK 2/Optimization_methods_v1b.ipynb | ###Markdown
Optimization MethodsUntil now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result. Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this: **Figure 1** : **Minimizing the cost is like finding the lowest point in a hilly landscape** At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. **Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.To get started, run the following code to import the libraries you will need. Updates to Assignment If you were working on a previous version* The current notebook filename is version "Optimization_methods_v1b". * You can find your work in the file directory as version "Optimization methods'.* To see the file directory, click on the Coursera logo at the top left of the notebook. List of Updates* op_utils is now opt_utils_v1a. Assertion statement in `initialize_parameters` is fixed.* opt_utils_v1a: `compute_cost` function now accumulates total cost of the batch without taking the average (average is taken for entire epoch instead).* In `model` function, the total cost per mini-batch is accumulated, and the average of the entire epoch is taken as the average cost. So the plot of the cost function over time is now a smooth downward curve instead of an oscillating curve.* Print statements used to check each function are reformatted, and 'expected output` is reformatted to match the format of the print statements (for easier visual comparisons).
###Code
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
###Output
_____no_output_____
###Markdown
1 - Gradient DescentA simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent. **Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
###Code
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)]-learning_rate*grads['dW' + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)]-learning_rate*grads['db' + str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
###Output
W1 =
[[ 1.63535156 -0.62320365 -0.53718766]
[-1.07799357 0.85639907 -2.29470142]]
b1 =
[[ 1.74604067]
[-0.75184921]]
W2 =
[[ 0.32171798 -0.25467393 1.46902454]
[-2.05617317 -0.31554548 -0.3756023 ]
[ 1.1404819 -1.09976462 -0.1612551 ]]
b2 =
[[-0.88020257]
[ 0.02561572]
[ 0.57539477]]
###Markdown
**Expected Output**:```W1 =[[ 1.63535156 -0.62320365 -0.53718766] [-1.07799357 0.85639907 -2.29470142]]b1 =[[ 1.74604067] [-0.75184921]]W2 =[[ 0.32171798 -0.25467393 1.46902454] [-2.05617317 -0.31554548 -0.3756023 ] [ 1.1404819 -1.09976462 -0.1612551 ]]b2 =[[-0.88020257] [ 0.02561572] [ 0.57539477]]``` A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent. - **(Batch) Gradient Descent**:``` pythonX = data_inputY = labelsparameters = initialize_parameters(layers_dims)for i in range(0, num_iterations): Forward propagation a, caches = forward_propagation(X, parameters) Compute cost. cost += compute_cost(a, Y) Backward propagation. grads = backward_propagation(a, caches, parameters) Update parameters. parameters = update_parameters(parameters, grads) ```- **Stochastic Gradient Descent**:```pythonX = data_inputY = labelsparameters = initialize_parameters(layers_dims)for i in range(0, num_iterations): for j in range(0, m): Forward propagation a, caches = forward_propagation(X[:,j], parameters) Compute cost cost += compute_cost(a, Y[:,j]) Backward propagation grads = backward_propagation(a, caches, parameters) Update parameters. parameters = update_parameters(parameters, grads)``` In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this: **Figure 1** : **SGD vs GD** "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). **Note** also that implementing SGD requires 3 for-loops in total:1. Over the number of iterations2. Over the $m$ training examples3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples. **Figure 2** : **SGD vs Mini-Batch GD** "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. **What you should remember**:- The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.- You have to tune a learning rate hyperparameter $\alpha$.- With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large). 2 - Mini-Batch Gradient descentLet's learn how to build mini-batches from the training set (X, Y).There are two steps:- **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches. - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this: **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:```pythonfirst_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]...```Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
###Code
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, k*mini_batch_size :(k+1)* mini_batch_size]
mini_batch_Y = shuffled_Y[:, k*mini_batch_size :(k+1)* mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:,num_complete_minibatches*mini_batch_size:]
mini_batch_Y = shuffled_Y[:,num_complete_minibatches*mini_batch_size:]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
###Output
shape of the 1st mini_batch_X: (12288, 64)
shape of the 2nd mini_batch_X: (12288, 64)
shape of the 3rd mini_batch_X: (12288, 20)
shape of the 1st mini_batch_Y: (1, 64)
shape of the 2nd mini_batch_Y: (1, 64)
shape of the 3rd mini_batch_Y: (1, 20)
mini batch sanity check: [ 0.90085595 -0.7612069 0.2344157 ]
###Markdown
**Expected Output**: **shape of the 1st mini_batch_X** (12288, 64) **shape of the 2nd mini_batch_X** (12288, 64) **shape of the 3rd mini_batch_X** (12288, 20) **shape of the 1st mini_batch_Y** (1, 64) **shape of the 2nd mini_batch_Y** (1, 64) **shape of the 3rd mini_batch_Y** (1, 20) **mini batch sanity check** [ 0.90085595 -0.7612069 0.2344157 ] **What you should remember**:- Shuffling and Partitioning are the two steps required to build mini-batches- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128. 3 - MomentumBecause mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations. Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill. **Figure 3**: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$. **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:for $l =1,...,L$:```pythonv["dW" + str(l+1)] = ... (numpy array of zeros with the same shape as parameters["W" + str(l+1)])v["db" + str(l+1)] = ... (numpy array of zeros with the same shape as parameters["b" + str(l+1)])```**Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
###Code
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros_like(parameters['W'+str(l+1)])
v["db" + str(l+1)] = np.zeros_like(parameters['b'+str(l+1)])
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
###Output
v["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db1"] =
[[ 0.]
[ 0.]]
v["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
###Markdown
**Expected Output**:```v["dW1"] =[[ 0. 0. 0.] [ 0. 0. 0.]]v["db1"] =[[ 0.] [ 0.]]v["dW2"] =[[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]]v["db2"] =[[ 0.] [ 0.] [ 0.]]``` **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$: $$ \begin{cases}v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}\end{cases}\tag{3}$$$$\begin{cases}v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}} \end{cases}\tag{4}$$where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
###Code
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = beta*v['dW'+str(l+1)]+(1-beta)*grads['dW'+str(l+1)]
v["db" + str(l+1)] = beta*v['db'+str(l+1)]+(1-beta)*grads['db'+str(l+1)]
# update parameters
parameters["W" + str(l+1)] = parameters["W" + str(l+1)]-learning_rate*v['dW'+str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)]-learning_rate*v['db'+str(l+1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
###Output
W1 =
[[ 1.62544598 -0.61290114 -0.52907334]
[-1.07347112 0.86450677 -2.30085497]]
b1 =
[[ 1.74493465]
[-0.76027113]]
W2 =
[[ 0.31930698 -0.24990073 1.4627996 ]
[-2.05974396 -0.32173003 -0.38320915]
[ 1.13444069 -1.0998786 -0.1713109 ]]
b2 =
[[-0.87809283]
[ 0.04055394]
[ 0.58207317]]
v["dW1"] =
[[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]]
v["db1"] =
[[-0.01228902]
[-0.09357694]]
v["dW2"] =
[[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]]
v["db2"] = v[[ 0.02344157]
[ 0.16598022]
[ 0.07420442]]
###Markdown
**Expected Output**:```W1 = [[ 1.62544598 -0.61290114 -0.52907334] [-1.07347112 0.86450677 -2.30085497]]b1 = [[ 1.74493465] [-0.76027113]]W2 = [[ 0.31930698 -0.24990073 1.4627996 ] [-2.05974396 -0.32173003 -0.38320915] [ 1.13444069 -1.0998786 -0.1713109 ]]b2 = [[-0.87809283] [ 0.04055394] [ 0.58207317]]v["dW1"] = [[-0.11006192 0.11447237 0.09015907] [ 0.05024943 0.09008559 -0.06837279]]v["db1"] = [[-0.01228902] [-0.09357694]]v["dW2"] = [[-0.02678881 0.05303555 -0.06916608] [-0.03967535 -0.06871727 -0.08452056] [-0.06712461 -0.00126646 -0.11173103]]v["db2"] = v[[ 0.02344157] [ 0.16598022] [ 0.07420442]]``` **Note** that:- The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.- If $\beta = 0$, then this just becomes standard gradient descent without momentum. **How do you choose $\beta$?**- The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much. - Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default. - Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$. **What you should remember**:- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.- You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$. 4 - AdamAdam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum. **How does Adam work?**1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction). 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction). 3. It updates parameters in a direction based on combining information from "1" and "2".The update rule is, for $l = 1, ..., L$: $$\begin{cases}v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}\end{cases}$$where:- t counts the number of steps taken of Adam - L is the number of layers- $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages. - $\alpha$ is the learning rate- $\varepsilon$ is a very small number to avoid dividing by zeroAs usual, we will store all parameters in the `parameters` dictionary **Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.**Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:for $l = 1, ..., L$:```pythonv["dW" + str(l+1)] = ... (numpy array of zeros with the same shape as parameters["W" + str(l+1)])v["db" + str(l+1)] = ... (numpy array of zeros with the same shape as parameters["b" + str(l+1)])s["dW" + str(l+1)] = ... (numpy array of zeros with the same shape as parameters["W" + str(l+1)])s["db" + str(l+1)] = ... (numpy array of zeros with the same shape as parameters["b" + str(l+1)])```
###Code
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros_like(parameters['W'+str(l+1)])
v["db" + str(l+1)] = np.zeros_like(parameters['b'+str(l+1)])
s["dW" + str(l+1)] = np.zeros_like(parameters['W'+str(l+1)])
s["db" + str(l+1)] = np.zeros_like(parameters['b'+str(l+1)])
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
###Output
v["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db1"] =
[[ 0.]
[ 0.]]
v["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
v["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
s["dW1"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]]
s["db1"] =
[[ 0.]
[ 0.]]
s["dW2"] =
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
s["db2"] =
[[ 0.]
[ 0.]
[ 0.]]
###Markdown
**Expected Output**:```v["dW1"] = [[ 0. 0. 0.] [ 0. 0. 0.]]v["db1"] = [[ 0.] [ 0.]]v["dW2"] = [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]]v["db2"] = [[ 0.] [ 0.] [ 0.]]s["dW1"] = [[ 0. 0. 0.] [ 0. 0. 0.]]s["db1"] = [[ 0.] [ 0.]]s["dW2"] = [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]]s["db2"] = [[ 0.] [ 0.] [ 0.]]``` **Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$: $$\begin{cases}v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon}\end{cases}$$**Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
###Code
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1*v['dW'+str(l+1)]+(1-beta1)*grads['dW'+str(l+1)]
v["db" + str(l+1)] = beta1*v['db'+str(l+1)]+(1-beta1)*grads['db'+str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v['dW'+str(l+1)]/(1-np.power(beta1,t))
v_corrected["db" + str(l+1)] = v['db'+str(l+1)]/(1-np.power(beta1,t))
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2*s['dW'+str(l+1)]+(1-beta2)*np.power(grads['dW'+str(l+1)],2)
s["db" + str(l+1)] = beta2*s['db'+str(l+1)]+(1-beta2)*np.power(grads['db'+str(l+1)],2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s['dW'+str(l+1)]/(1-np.power(beta2,t))
s_corrected["db" + str(l+1)] = s['db'+str(l+1)]/(1-np.power(beta2,t))
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters['W'+str(l+1)]-learning_rate*v_corrected['dW'+str(l+1)]/np.sqrt(s_corrected['dW'+str(l+1)]+epsilon)
parameters["b" + str(l+1)] = parameters['b'+str(l+1)]-learning_rate*v_corrected['db'+str(l+1)]/np.sqrt(s_corrected['db'+str(l+1)]+epsilon)
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
###Output
W1 =
[[ 1.63178673 -0.61919778 -0.53561312]
[-1.08040999 0.85796626 -2.29409733]]
b1 =
[[ 1.75225313]
[-0.75376553]]
W2 =
[[ 0.32648046 -0.25681174 1.46954931]
[-2.05269934 -0.31497584 -0.37661299]
[ 1.14121081 -1.09245036 -0.16498684]]
b2 =
[[-0.88529978]
[ 0.03477238]
[ 0.57537385]]
v["dW1"] =
[[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]]
v["db1"] =
[[-0.01228902]
[-0.09357694]]
v["dW2"] =
[[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]]
v["db2"] =
[[ 0.02344157]
[ 0.16598022]
[ 0.07420442]]
s["dW1"] =
[[ 0.00121136 0.00131039 0.00081287]
[ 0.0002525 0.00081154 0.00046748]]
s["db1"] =
[[ 1.51020075e-05]
[ 8.75664434e-04]]
s["dW2"] =
[[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
[ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
[ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]
s["db2"] =
[[ 5.49507194e-05]
[ 2.75494327e-03]
[ 5.50629536e-04]]
###Markdown
**Expected Output**:```W1 = [[ 1.63178673 -0.61919778 -0.53561312] [-1.08040999 0.85796626 -2.29409733]]b1 = [[ 1.75225313] [-0.75376553]]W2 = [[ 0.32648046 -0.25681174 1.46954931] [-2.05269934 -0.31497584 -0.37661299] [ 1.14121081 -1.09245036 -0.16498684]]b2 = [[-0.88529978] [ 0.03477238] [ 0.57537385]]v["dW1"] = [[-0.11006192 0.11447237 0.09015907] [ 0.05024943 0.09008559 -0.06837279]]v["db1"] = [[-0.01228902] [-0.09357694]]v["dW2"] = [[-0.02678881 0.05303555 -0.06916608] [-0.03967535 -0.06871727 -0.08452056] [-0.06712461 -0.00126646 -0.11173103]]v["db2"] = [[ 0.02344157] [ 0.16598022] [ 0.07420442]]s["dW1"] = [[ 0.00121136 0.00131039 0.00081287] [ 0.0002525 0.00081154 0.00046748]]s["db1"] = [[ 1.51020075e-05] [ 8.75664434e-04]]s["dW2"] = [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04] [ 1.57413361e-04 4.72206320e-04 7.14372576e-04] [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]s["db2"] = [[ 5.49507194e-05] [ 2.75494327e-03] [ 5.50629536e-04]]``` You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference. 5 - Model with different optimization algorithmsLets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
###Code
train_X, train_Y = load_dataset()
###Output
_____no_output_____
###Markdown
We have already implemented a 3-layer neural network. You will train it with: - Mini-batch **Gradient Descent**: it will call your function: - `update_parameters_with_gd()`- Mini-batch **Momentum**: it will call your functions: - `initialize_velocity()` and `update_parameters_with_momentum()`- Mini-batch **Adam**: it will call your functions: - `initialize_adam()` and `update_parameters_with_adam()`
###Code
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now run this 3 layer neural network with each of the 3 optimization methods. 5.1 - Mini-batch Gradient descentRun the following code to see how the model does with mini-batch gradient descent.
###Code
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
Cost after epoch 0: 0.702405
Cost after epoch 1000: 0.668101
Cost after epoch 2000: 0.635288
Cost after epoch 3000: 0.600491
Cost after epoch 4000: 0.573367
Cost after epoch 5000: 0.551977
Cost after epoch 6000: 0.532370
Cost after epoch 7000: 0.514007
Cost after epoch 8000: 0.496472
Cost after epoch 9000: 0.468014
###Markdown
5.2 - Mini-batch gradient descent with momentumRun the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
###Code
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
Cost after epoch 0: 0.702413
Cost after epoch 1000: 0.668167
Cost after epoch 2000: 0.635388
Cost after epoch 3000: 0.600591
Cost after epoch 4000: 0.573444
Cost after epoch 5000: 0.552058
Cost after epoch 6000: 0.532458
Cost after epoch 7000: 0.514101
Cost after epoch 8000: 0.496652
Cost after epoch 9000: 0.468160
###Markdown
5.3 - Mini-batch with Adam modeRun the following code to see how the model does with Adam.
###Code
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
Cost after epoch 0: 0.702166
Cost after epoch 1000: 0.167966
Cost after epoch 2000: 0.141320
Cost after epoch 3000: 0.138782
Cost after epoch 4000: 0.136111
Cost after epoch 5000: 0.134327
Cost after epoch 6000: 0.131147
Cost after epoch 7000: 0.130245
Cost after epoch 8000: 0.129655
Cost after epoch 9000: 0.129159
|
ai1/labs/AI1_05.ipynb | ###Markdown
05 Logistic Regression
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import pandas as pd
import numpy as np
from seaborn import lmplot, stripplot
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
###Output
_____no_output_____
###Markdown
Introductory Remarks We're going to predict whether someone if over the drink-drive limit or not. This is classification, not regression. We have a dataset collected by some friends of mine. TBH it is too small to do anything reliable. We use logistic regression, not linear regression, and we measure accuracy, not mean absolute error. This notebook is incomplete. Your job is to complete it. Read in and Check the Data
###Code
# Use pandas to read the CSV file into a DataFrame
df = pd.read_csv("../datasets/dataset_alcohol.csv")
# Shuffle the dataset
df = df.sample(frac=1, random_state=2)
df.reset_index(drop=True, inplace=True)
df.shape
df.columns
features = ['age_yrs', 'height_cm', 'weight_kg', 'duration_mins', 'elapsed_mins', 'sex', 'last_meal', 'units']
###Output
_____no_output_____
###Markdown
It's your job now to inspect the data. Determine whether there is any cleaning needed. Create a Test Set
###Code
# Split off the test set: 20% of the dataset.
dev_df, test_df = train_test_split(df, train_size=0.8, stratify=df["over_limit"], random_state=2)
# Extract the features but leave as a DataFrame
dev_X = dev_df[features]
test_X = test_df[features]
# Target values, encoded and converted to a 1D numpy array
label_encoder = LabelEncoder()
label_encoder.fit(df["over_limit"])
dev_y = label_encoder.transform(dev_df["over_limit"])
test_y = label_encoder.transform(test_df["over_limit"])
###Output
_____no_output_____
###Markdown
Dataset Exploration
###Code
# It can be good to do this on a copy of the dataset (excluding the test set, of course)
copy_df = dev_df.copy()
plot = lmplot(x="units", y="weight_kg", hue="over_limit", data=copy_df, fit_reg=False)
plot = stripplot(x="over_limit", y="elapsed_mins", data=copy_df, jitter=0.2)
# This one only works properly if you've done some preprocessing earlier!
###Output
_____no_output_____ |
tutorials/Solving_Differential_Equations_with_Python_mechanical_vibrations.ipynb | ###Markdown
- - - - Mechpy Tutorialsa mechanical engineering toolboxsource code - https://github.com/nagordon/mechpy documentation - https://nagordon.github.io/mechpy/web/ - - - -Neal Gordon 2017-02-20 - - - - Solving a second order differential eqaution symbolically with python
###Code
# setup
from sympy import Eq, pi
import sympy as sp
import matplotlib.pyplot as plt
from numpy import linspace
sp.init_printing(use_latex='mathjax')
get_ipython().magic('matplotlib inline') # inline plotting
t,k,m,c = sp.symbols('t,k,m,c')
x = sp.Function('x') # (t)
k_ = 1e3 # spring constant, kN/m
m_ = 50 # mass, Kg
c_ = 3 # damping coefficient
ode = k*x(t) + m*c*x(t).diff(t,1) + m*x(t).diff(t,2)
Eq(ode)
ode_sol = sp.dsolve(ode)
ode_sol
x0 = 1.0
v0 = 0
bc1 = Eq(ode_sol.lhs.subs(x(t),x0), ode_sol.rhs.subs(t,0))
bc2 = Eq(ode_sol.lhs.subs(x(t),v0), ode_sol.rhs.diff(t).subs(t,0))
C_eq = {bc1,bc2}
C_eq
known_params = {m,c,k,t}
const = ode_sol.free_symbols - known_params
const
Csol = sp.solve(C_eq,const)
Csol
ode_sol = ode_sol.subs(Csol)
ode_sol
ode_sol = ode_sol.subs({m:m_, c:c_, k:k_})
ode_sol
#sp.plot(ode_sol.rhs, (t,0,5)) ;
xfun = sp.lambdify(t,ode_sol.rhs, "numpy")
vfun = sp.lambdify(t,sp.diff(ode_sol.rhs), "numpy")
t = linspace(0,5,1000)
fig, ax1 = plt.subplots(figsize=(12,8))
ax2 = ax1.twinx()
ax1.plot(t,xfun(t),'b',label = r'$x (mm)$', linewidth=2.0)
ax2.plot(t,vfun(t),'g--',label = r'$\dot{x} (m/sec)$', linewidth=2.0)
ax2.legend(loc='lower right')
ax1.legend()
ax1.set_xlabel('time , sec')
ax1.set_ylabel('disp (mm)',color='b')
ax2.set_ylabel('velocity (m/s)',color='g')
plt.title('Mass-Spring System with $v_0=0.1%f' % (v0))
plt.grid()
plt.show()
###Output
C:\Users\neal\Anaconda3\lib\site-packages\numpy\core\numeric.py:474: ComplexWarning: Casting complex values to real discards the imaginary part
return array(a, dtype, copy=False, order=order)
###Markdown
Solving a second order differential equation numerically
###Code
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,xlabel,ylabel,title,legend,figure,subplots
from numpy import cos, pi, arange, sqrt, pi, array
get_ipython().magic('matplotlib inline') # inline plotting
def MassSpringDamper(state,t):
'''
k=spring constant, Newtons per metre
m=mass, Kilograms
c=dampign coefficient, Newton*second / meter
for a mass, spring, damper
xdd = -k*x/m -c*xd
'''
k=1e3 # spring constant, kN/m
m=50 # mass, Kg
c=3 # damping coefficient
# unpack the state vector
x,xd = state # displacement,x and velocity x'
# compute acceleration xdd = x''
xdd = -k*x/m -c*xd
return [xd, xdd]
x0 = 1.0
v0 = 0
state0 = [x0, v0] #initial conditions [x0 , v0] [m, m/sec]
ti = 0.0 # initial time
tf = 5.0 # final time
step = 0.001 # step
t = arange(ti, tf, step)
state = odeint(MassSpringDamper, state0, t)
x = array(state[:,[0]])
xd = array(state[:,[1]])
# Plotting displacement and velocity
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 14
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(t,x,'b',label = r'$x (mm)$', linewidth=2.0)
ax2.plot(t,xd,'g--',label = r'$\dot{x} (m/sec)$', linewidth=2.0)
ax2.legend(loc='lower right')
ax1.legend()
ax1.set_xlabel('time , sec')
ax1.set_ylabel('disp (mm)',color='b')
ax2.set_ylabel('velocity (m/s)',color='g')
plt.title('Mass-Spring System with $v_0=%0.1f$' % (v0))
plt.grid()
plt.show()
###Output
_____no_output_____ |
daily_discards.ipynb | ###Markdown
Discard and Sidestream Analysis Setup* Install [Jupyter](http://jupyter.org/install)* Install [gcloud SDK](https://cloud.google.com/sdk/downloads) + Authenticate: `gcloud auth login` + Set default projects: `gcloud config set project mlab-sandbox`* Install the google-cloud-bigquery package: + `pip install --upgrade google-cloud-bigquery` * Start Jupyter + `jupyter notebook` References* Matplotlib - https://matplotlib.org/contents.html* Pandas - https://pandas.pydata.org/pandas-docs/stable/api.html * BigQuery - https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators
###Code
# Enables figures to load outside of browser.
%matplotlib
# Enables figures to load inline in the browser.
#%matplotlib inline
import os
import math
import pandas as pd
import numpy as np
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import matplotlib.ticker
import datetime
import collections
# Some matplotlib features are version dependent.
assert(matplotlib.__version__ >= '2.1.2')
# Depends on: pip install --upgrade google-cloud-bigquery
from google.cloud import bigquery
def run_query(query, project='mlab-sandbox'):
#print query
client = bigquery.Client(project=project)
job = client.query(query)
results = collections.defaultdict(list)
for row in job.result(timeout=300):
for key in row.keys():
results[key].append(row.get(key))
return pd.DataFrame(results)
def unlog(x, pos):
v = math.pow(10, x)
frac, whole = math.modf(v)
if frac > 0:
return '%.1f' % v
else:
return '%d' % whole
logFormatter = matplotlib.ticker.FuncFormatter(unlog)
###Output
_____no_output_____
###Markdown
Daily Saturation Events @ 500Mbps If we define "uplink saturation" as a 10s timebin with an average utilization above a given rate, then we can plot the "Percent of Daily timebins" that exceed this threshold over time at various sites in the US using DISCO data.Note: this does not account for recent 10g site upgrades.
###Code
df_disco = run_query("""
#standardSQL
SELECT
name AS hostname,
FORMAT_TIMESTAMP("%Y-%m-%d", TIMESTAMP_TRUNC(sts, DAY)) AS day,
UNIX_SECONDS(TIMESTAMP_TRUNC(sts, DAY)) AS ts,
SUM(IF(metric = 'switch.octets.uplink.tx' AND (value * 8 / 10000000) >= 500, 1, 0)) as saturated_events_500,
SUM(IF(metric = 'switch.octets.uplink.tx' AND (value * 8 / 10000000) >= 800, 1, 0)) as saturated_events_800,
SUM(IF(metric = 'switch.octets.uplink.tx' AND (value * 8 / 10000000) >= 800, 1, 0)) as saturated_events_900
FROM (
SELECT
metric,
REGEXP_EXTRACT(hostname, r'(mlab[1-4].[a-z]{3}[0-9]{2}).*') AS name,
sample.timestamp AS sts,
sample.value AS value
FROM
`mlab-sandbox.base_tables.switch*`,
UNNEST(sample) AS sample
WHERE
metric LIKE 'switch.octets.uplink.tx'
GROUP BY
hostname, metric, sts, value
)
WHERE
name IS NOT NULL
GROUP BY
hostname, day, ts
ORDER BY
hostname, day, ts
""")
len(df_disco)
sites = [
['dfw', 'lga', 'iad'],
['lax', 'atl', 'den'],
['sea', 'nuq', 'ord'], # MIA is lower utilization.
]
cols = len(sites[0])
fig = plt.figure(figsize=(4 * cols, 4 * cols))
axes = [
[None] * cols,
[None] * cols,
[None] * cols,
]
for r, siter in enumerate(sites):
for c, site in enumerate(siter):
axes[r][c] = plt.subplot2grid((3, cols), (r, c))
if c != 0:
pass
else:
axes[r][c].set_ylabel('% Saturated Timebins')
if r != 2:
axes[r][c].set_xticklabels([])
prefix = 'mlab1.' + site
ds_sites = df_disco[ df_disco['hostname'].str.contains(prefix) ]
for h in sorted(set(ds_sites[ ds_sites['hostname'].str.contains(prefix) ]['hostname'])):
ds = ds_sites[ (ds_sites['hostname'].str.contains(h)) ]
axes[r][c].plot_date(
dates.epoch2num(ds['ts']),
100 * ds['saturated_events_500'] / 8640, ls='-', ms=0, label=h[6:11])
axes[r][c].set_title(site)
axes[r][c].tick_params(axis='x', labelrotation=90)
axes[r][c].grid(color='#dddddd')
axes[r][c].legend(loc=2, fontsize='x-small', ncol=2)
fig.suptitle('Daily Saturation Events in US Sites @ 500 Mbps')
plt.show()
###Output
_____no_output_____
###Markdown
Daily DISCO Uplink Utilization
###Code
# DISCO RATES 90th PERCENTILE
df_disco_max = run_query("""
#standardSQL
SELECT
name AS hostname,
FORMAT_TIMESTAMP("%Y-%m-%d", TIMESTAMP_TRUNC(sts, DAY)) AS day,
UNIX_SECONDS(TIMESTAMP_TRUNC(sts, DAY)) AS ts,
APPROX_QUANTILES(value, 101)[ORDINAL(50)] as bytes_50th,
APPROX_QUANTILES(value, 101)[ORDINAL(90)] as bytes_90th,
APPROX_QUANTILES(value, 101)[ORDINAL(98)] as bytes_98th,
APPROX_QUANTILES(value, 101)[ORDINAL(99)] as bytes_99th,
MAX(value) as bytes_max
FROM (
SELECT
metric,
REGEXP_EXTRACT(hostname, r'(mlab[1-4].[a-z]{3}[0-9]{2}).*') AS name,
sample.timestamp AS sts,
sample.value AS value
FROM
`measurement-lab.base_tables.switch*`,
UNNEST(sample) AS sample
WHERE
metric LIKE 'switch.octets.uplink.tx'
GROUP BY
hostname, metric, sts, value
)
WHERE
name IS NOT NULL
GROUP BY
hostname, day, ts
ORDER BY
hostname, day, ts
""")
###Output
_____no_output_____
###Markdown
Sidestream Connection counts
###Code
df_ss_count = run_query(
"""#standardSQL
CREATE TEMPORARY FUNCTION sliceFromIP(ipaddr STRING)
AS ( MOD(CAST(REGEXP_EXTRACT(ipaddr, r'[:.]([0-9]+)$') AS INT64), 64) - 10 );
SELECT
hostname, ts, count(*) as count
FROM (
SELECT
REGEXP_EXTRACT(test_id, r"\d\d\d\d/\d\d/\d\d/(mlab[1-4].[a-z]{3}[0-9]{2})") AS hostname,
UNIX_SECONDS(TIMESTAMP_TRUNC(log_time, DAY)) AS ts
FROM
-- NOTE: some tables may be reset, removed, or have partial data.
-- `mlab-sandbox.batch.sidestream*`
-- `mlab-sandbox.gfr.sidestream_*`
`measurement-lab.batch.sidestream`
WHERE
REGEXP_CONTAINS(test_id, r"mlab1.(dfw|lga|iad|lax|atl|nuq)[0-9]{2}.*")
--AND sliceFromIP(web100_log_entry.connection_spec.local_ip) = 7
AND web100_log_entry.snap.HCThruOctetsAcked >= 1000000 -- 819200
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 600000000
AND (web100_log_entry.snap.State = 1 OR
(web100_log_entry.snap.State >= 5 AND
web100_log_entry.snap.State <= 11))
GROUP BY
hostname, ts, web100_log_entry.connection_spec.remote_ip, web100_log_entry.connection_spec.remote_port, web100_log_entry.connection_spec.local_port, web100_log_entry.connection_spec.local_ip
)
GROUP BY
hostname, ts
ORDER BY
hostname, ts
""")
print len(df_ss_count)
###Output
1031
###Markdown
Daily Uplink Utilization & Sidestream Connection Counts for SK
###Code
# MIA, DEN, and SEA are relatively low utilization.
# NUQ, ORD show trends less dramatic than those below.
# LGA usage appeared to dramatically lower around 2018-01. I think this is a bug, since uplink utilization remains high.
# Highest utilization sites.
sites = [
'dfw', 'iad', 'lax', 'atl'
]
cols = len(sites)
fig = plt.figure(figsize=(4 * cols, 6))
axes = [
[None] * cols,
[None] * cols,
]
for c, site in enumerate(sites):
axes[0][c] = plt.subplot2grid((2, cols), (0, c))
axes[1][c] = plt.subplot2grid((2, cols), (1, c))
prefix = 'mlab1.' + site
r = 0
if c > 0:
# Hide ylabels after the first column.
axes[r][c].set_yticklabels([])
else:
axes[r][c].set_ylabel('Mbps')
# Extract all hostnames that contain the "mlab1.<site>" prefix.
ds_sites = df_disco_max[ df_disco_max['hostname'].str.contains(prefix) ]
for host in sorted(set(ds_sites['hostname'])):
# Plot each host on the current axis.
ds = ds_sites[ (ds_sites['hostname'].str.contains(host)) ]
axes[r][c].plot_date(
dates.epoch2num(ds['ts']),
ds['bytes_90th'] * 8 / 10000000,
ls='-', ms=0, label=host[6:11] + '-90th')
axes[r][c].set_title(site)
axes[r][c].set_ylim(100, 1000)
axes[r][c].set_xticklabels([])
axes[r][c].tick_params(axis='x', labelrotation=90)
axes[r][c].grid(color='#dddddd')
axes[r][c].legend(loc=2, fontsize='x-small', ncol=2)
r = 1
if c > 0:
axes[r][c].set_yticklabels([])
else:
axes[r][c].set_ylabel('Connection Counts')
ds_sites = df_ss_count[ df_ss_count['hostname'].str.contains(prefix) ]
for host in sorted(set(ds_sites['hostname'])):
ds = ds_sites[ (ds_sites['hostname'].str.contains(host)) ]
axes[r][c].plot_date(
dates.epoch2num(ds['ts']),
ds['count'],
ls='-', ms=0, label=host[6:11])
axes[r][c].set_ylim(0, 25000)
axes[r][c].tick_params(axis='x', labelrotation=90)
axes[r][c].grid(color='#dddddd')
axes[r][c].legend(loc=2, fontsize='x-small', ncol=2)
fig.suptitle('Daily 98th Percentile Switch Traffic & TCP Connection Counts Per Metro')
plt.show()
###Output
_____no_output_____
###Markdown
Flow-Control Trial (measurement-lab.public)
###Code
df_ss_trial = run_query("""
#standardSQL
-- Only works for mlab1 addresses. May not work on all machines.
CREATE TEMPORARY FUNCTION sliceFromIP(ipaddr STRING)
AS ( MOD(CAST(REGEXP_EXTRACT(ipaddr, r'[:.]([0-9]+)$') AS INT64), 64) - 10 );
CREATE TEMPORARY FUNCTION betweenTimes(ts INT64, starttime STRING, endtime STRING)
AS ( TIMESTAMP_SECONDS(ts) >= TIMESTAMP(starttime) AND TIMESTAMP_SECONDS(ts) <= TIMESTAMP(endtime) );
SELECT
CASE
WHEN sliceFromIP(web100_log_entry.connection_spec.local_ip) = 1 THEN 'ndt'
WHEN sliceFromIP(web100_log_entry.connection_spec.local_ip) = 7 THEN 'samknows'
WHEN sliceFromIP(web100_log_entry.connection_spec.local_ip) = 9 THEN 'neubot'
ELSE 'other'
END AS slice,
CASE
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-01-26 00:00:00", "2018-01-27 00:00:00") THEN '5w'
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-02 00:00:00", "2018-02-03 00:00:00") THEN '4w'
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-09 00:00:00", "2018-02-10 00:00:00") THEN '3w'
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-16 00:00:00", "2018-02-17 00:00:00") THEN '2w'
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-23 00:00:00", "2018-02-24 00:00:00") THEN '1w'
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-03-02 00:00:00", "2018-03-03 00:00:00") THEN '0w'
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-03-09 00:00:00", "2018-03-10 00:00:00") THEN '+1w'
ELSE 'unknown'
END AS period,
REGEXP_EXTRACT(test_id, r"\d\d\d\d/\d\d/\d\d/(mlab[1-4].[a-z]{3}[0-9]{2})") AS hostname,
web100_log_entry.snap.StartTimeStamp AS ts,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) as rate_mbps
FROM
-- `measurement-lab.public.sidestream`
-- `mlab-sandbox.batch.sidestream*`
`mlab-sandbox.gfr.sidestream_*`
WHERE
( betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-01-26 00:00:00", "2018-01-27 00:00:00")
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-02 00:00:00", "2018-02-03 00:00:00")
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-09 00:00:00", "2018-02-10 00:00:00")
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-16 00:00:00", "2018-02-17 00:00:00")
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-23 00:00:00", "2018-02-24 00:00:00")
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-03-02 00:00:00", "2018-03-03 00:00:00")
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-03-09 00:00:00", "2018-03-10 00:00:00")
)
AND REGEXP_CONTAINS(test_id, r"mlab1.(dfw\d\d)")
AND web100_log_entry.snap.HCThruOctetsAcked >= 1000000 -- 819200
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 600000000
AND (web100_log_entry.snap.State = 1 OR
(web100_log_entry.snap.State >= 5 AND
web100_log_entry.snap.State <= 11))
GROUP BY
hostname, slice, period, ts, rate_mbps
""")
df_ss_trial_pct = run_query("""
CREATE TEMPORARY FUNCTION betweenTimes(ts INT64, starttime STRING, endtime STRING)
AS ( TIMESTAMP_SECONDS(ts) >= TIMESTAMP(starttime) AND TIMESTAMP_SECONDS(ts) <= TIMESTAMP(endtime) );
SELECT
CASE
WHEN betweenTimes(StartTimeStamp, "2018-02-16 00:00:00", "2018-02-17 00:00:00") THEN CONCAT(sitename, '-2w')
WHEN betweenTimes(StartTimeStamp, "2018-02-23 00:00:00", "2018-02-24 00:00:00") THEN CONCAT(sitename, '-1w')
WHEN betweenTimes(StartTimeStamp, "2018-03-02 00:00:00", "2018-03-03 00:00:00") THEN CONCAT(sitename, '-0w (flow)')
ELSE 'unknown'
END AS test_period,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(10)], 2) as q10,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(12)], 2) as q12,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(15)], 2) as q15,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(18)], 2) as q18,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(20)], 2) as q20,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(22)], 2) as q22,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(25)], 2) as q25,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(28)], 2) as q28,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(30)], 2) as q30,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(32)], 2) as q32,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(35)], 2) as q35,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(38)], 2) as q38,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(40)], 2) as q40,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(42)], 2) as q42,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(45)], 2) as q45,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(48)], 2) as q48,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(50)], 2) as q50,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(52)], 2) as q52,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(55)], 2) as q55,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(58)], 2) as q58,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(60)], 2) as q60,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(62)], 2) as q62,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(65)], 2) as q65,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(68)], 2) as q68,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(70)], 2) as q70,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(72)], 2) as q72,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(75)], 2) as q75,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(78)], 2) as q78,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(80)], 2) as q80,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(82)], 2) as q82,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(85)], 2) as q85,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(88)], 2) as q88,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(90)], 2) as q90,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(92)], 2) as q92,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(95)], 2) as q95,
round(APPROX_QUANTILES(rate_mbps, 101) [ORDINAL(98)], 2) as q98
FROM
(
SELECT
UNIX_SECONDS(TIMESTAMP_TRUNC(log_time, DAY)) as StartTimeStamp,
-- web100_log_entry.snap.StartTimeStamp as StartTimeStamp,
REGEXP_EXTRACT(test_id, r"\d\d\d\d/\d\d/[0-9]+/mlab1.(dfw02|lga03)/.*") AS sitename,
8 * (
web100_log_entry.snap.HCThruOctetsAcked / (
web100_log_entry.snap.SndLimTimeRwin + web100_log_entry.snap.SndLimTimeCwnd + web100_log_entry.snap.SndLimTimeSnd)
) AS rate_mbps
FROM
-- `mlab-sandbox.batch.sidestream*`
`mlab-sandbox.gfr.sidestream_*`
WHERE
REGEXP_CONTAINS(test_id, r"\d\d\d\d/\d\d/[0-9]+/mlab1.(dfw02|lga03)/.*")
AND (
betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-16 00:00:00", "2018-02-17 00:00:00")
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-02-23 00:00:00", "2018-02-24 00:00:00")
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, "2018-03-02 00:00:00", "2018-03-03 00:00:00"))
AND web100_log_entry.snap.HCThruOctetsAcked >= 819200
AND ( web100_log_entry.snap.SndLimTimeRwin + web100_log_entry.snap.SndLimTimeCwnd + web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND ( web100_log_entry.snap.SndLimTimeRwin + web100_log_entry.snap.SndLimTimeCwnd + web100_log_entry.snap.SndLimTimeSnd) < 600000000
AND ( web100_log_entry.snap.State = 1 OR ( web100_log_entry.snap.State >= 5 AND web100_log_entry.snap.State <= 11))
)
GROUP BY
sitename, test_period
ORDER BY
sitename, test_period
""")
# Transpose the long list of quantiles.
# Save test_period names, so we can name the quantile values after transpose.
test_periods = df_ss_trial_pct['test_period']
n = df_ss_trial_pct.drop(['test_period'], axis=1)
t = n.transpose()
t.columns = test_periods
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(11,6))
# Reformat the percentile column names as integer numbers.
percentiles = [int(v[1:]) for v in list(sorted(n.keys()))]
for period in test_periods:
axes.plot(percentiles, t[period], label=period)
axes.legend(loc=2)
axes.set_ylabel('Mbps')
axes.set_xlabel('Percentiles')
axes.grid(color='#dddddd')
fig.suptitle('Sidestream comparing Flow-control trial to earlier periods')
plt.show()
###Output
_____no_output_____
###Markdown
Historical (mlab-sandbox.batch) - Sidestream by Period & Slice Variations, for each period:* all sidestream connections from each period.* all sidestream connections from each period and slice* all sidestream connections from each period and slice and from same cohort.* some sidestream connections from each period and slice and from same cohort, grouped by ts & remote_ip.* some sidestream connections from each period and slice and from same cohort, grouped by only by remote_ip.
###Code
hosts = [
'mlab1.dfw02', 'mlab1.dfw05',
'mlab1.iad01', 'mlab1.iad02', 'mlab1.iad03', 'mlab1.iad04', 'mlab1.iad05',
'mlab1.lax02', 'mlab1.lax03', 'mlab1.lax04', 'mlab1.lax05',
'mlab1.lga02', 'mlab1.lga03', 'mlab1.lga04', 'mlab1.lga05', 'mlab1.lga06',
'mlab1.atl02', 'mlab1.atl03', 'mlab1.atl04', 'mlab1.atl05',
]
hosts = [
'mlab1.dfw02'
]
# (datetime.datetime(2017, 8, 23), datetime.datetime(2017, 8, 28)),
# (datetime.datetime(2017, 8, 28), datetime.datetime(2017, 10, 14)),
periods_list = [
(datetime.datetime(2017, 10, 14), datetime.datetime(2017, 12, 7)),
(datetime.datetime(2017, 12, 7), datetime.datetime(2018, 1, 12)),
(datetime.datetime(2018, 1, 12), datetime.datetime(2018, 1, 21)),
(datetime.datetime(2018, 1, 21), datetime.datetime(2018, 2, 1)),
(datetime.datetime(2018, 2, 1), datetime.datetime(2018, 3, 1)),
(datetime.datetime(2018, 3, 1), datetime.datetime(2018, 3, 10)),
]
df_disco_discards = run_query("""
#standardSQL
SELECT
name AS hostname,
FORMAT_TIMESTAMP("%Y-%m-%d", TIMESTAMP_TRUNC(sts, DAY)) AS day,
UNIX_SECONDS(TIMESTAMP_TRUNC(sts, DAY)) AS ts,
COUNTIF(metric = 'switch.discards.uplink.tx' AND value > 0) / 8640 AS pct_discards
FROM (
SELECT
metric,
REGEXP_EXTRACT(hostname, r'(mlab[1-4].[a-z]{3}[0-9]{2}).*') AS name,
sample.timestamp AS sts,
sample.value AS value
FROM
`measurement-lab.base_tables.switch*`,
UNNEST(sample) AS sample
WHERE
metric LIKE 'switch.discards.uplink.tx'
GROUP BY
hostname, metric, sts, value
)
WHERE
name IS NOT NULL
GROUP BY
hostname, day, ts
ORDER BY
hostname, day, ts
""")
# STREAMS WITH MATCHING COHORTS
def start_and_end(d):
s = d.strftime("%Y-%m-%d %H:%M:%S")
e = (d + datetime.timedelta(days=4)).strftime("%Y-%m-%d %H:%M:%S")
return s, e
df_hosts = collections.defaultdict(collections.defaultdict)
for i, periods in enumerate(periods_list):
a_s, a_e = start_and_end(periods[0])
b_s, b_e = start_and_end(periods[1])
for host in hosts:
result = run_query("""
#standardSQL
-- Only works for mlab1 addresses. May not work on all machines.
CREATE TEMPORARY FUNCTION sliceFromIP(ipaddr STRING)
AS ( MOD(CAST(REGEXP_EXTRACT(ipaddr, r'[:.]([0-9]+)$') AS INT64), 64) - 10 );
CREATE TEMPORARY FUNCTION betweenTimes(ts INT64, starttime STRING, endtime STRING)
AS ( TIMESTAMP_SECONDS(ts) >= TIMESTAMP(starttime) AND TIMESTAMP_SECONDS(ts) <= TIMESTAMP(endtime) );
SELECT
slice,
period,
hostname,
remote_ip,
AVG(sum_rate_mbps) as sum_rate_mbps
FROM (
SELECT
slice,
period,
hostname,
remote_ip,
--AVG(rate_mbps) as rate_mbps,
--APPROX_QUANTILES(rate_mbps, 101)[ORDINAL(50)] as med_rate_mbps,
--MAX(rate_mbps) as max_rate_mbps,
SUM(rate_mbps) as sum_rate_mbps
FROM (
SELECT
web100_log_entry.connection_spec.remote_ip as remote_ip,
CASE
WHEN sliceFromIP(web100_log_entry.connection_spec.local_ip) = 1 THEN 'ndt'
WHEN sliceFromIP(web100_log_entry.connection_spec.local_ip) = 7 THEN 'samknows'
WHEN sliceFromIP(web100_log_entry.connection_spec.local_ip) = 9 THEN 'neubot'
ELSE 'other'
END AS slice,
CASE
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, '"""+a_s+"""', '"""+a_e+"""')
THEN '"""+a_s+"""'
WHEN betweenTimes(web100_log_entry.snap.StartTimeStamp, '"""+b_s+"""', '"""+b_e+"""')
THEN '"""+b_s+"""'
ELSE 'bad'
END AS period,
REGEXP_EXTRACT(test_id, r"\d\d\d\d/\d\d/\d\d/(mlab[1-4].[a-z]{3}[0-9]{2})") AS hostname,
web100_log_entry.snap.StartTimeStamp AS ts,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) as rate_mbps
FROM
`mlab-sandbox.batch.sidestream*`
WHERE
(test_id LIKE '%"""+host+"""%')
AND web100_log_entry.snap.StartTimeStamp <= 1521674526
AND ( betweenTimes(web100_log_entry.snap.StartTimeStamp, '"""+a_s+"""', '"""+a_e+"""')
OR betweenTimes(web100_log_entry.snap.StartTimeStamp, '"""+b_s+"""', '"""+b_e+"""'))
AND web100_log_entry.snap.HCThruOctetsAcked >= 1000000 -- 819200
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 600000000
AND (web100_log_entry.snap.State = 1 OR
(web100_log_entry.snap.State >= 5 AND
web100_log_entry.snap.State <= 11))
AND web100_log_entry.connection_spec.remote_ip IN(
(SELECT
remote_ip
FROM (
SELECT
web100_log_entry.connection_spec.remote_ip as remote_ip,
count(*) as c1
FROM
`mlab-sandbox.batch.sidestream*`
WHERE
(test_id LIKE '%"""+host+"""%')
AND web100_log_entry.snap.StartTimeStamp <= 1521674526
AND betweenTimes(web100_log_entry.snap.StartTimeStamp, '"""+a_s+"""', '"""+a_e+"""')
AND sliceFromIP(web100_log_entry.connection_spec.local_ip) = 7
AND web100_log_entry.snap.HCThruOctetsAcked >= 819200
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 600000000
AND (web100_log_entry.snap.State = 1 OR
(web100_log_entry.snap.State >= 5 AND
web100_log_entry.snap.State <= 11))
GROUP BY
remote_ip
HAVING c1 > 10
) INNER JOIN (
SELECT
web100_log_entry.connection_spec.remote_ip as remote_ip,
count(*) as c2
FROM
`mlab-sandbox.batch.sidestream*`
WHERE
(test_id LIKE '%"""+host+"""%')
AND web100_log_entry.snap.StartTimeStamp <= 1521674526
AND betweenTimes(web100_log_entry.snap.StartTimeStamp, '"""+b_s+"""', '"""+b_e+"""')
AND sliceFromIP(web100_log_entry.connection_spec.local_ip) = 7
AND web100_log_entry.snap.HCThruOctetsAcked >= 819200
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 600000000
AND (web100_log_entry.snap.State = 1 OR
(web100_log_entry.snap.State >= 5 AND
web100_log_entry.snap.State <= 11))
GROUP BY
remote_ip
HAVING c2 > 10
) USING (remote_ip))
)
GROUP BY
hostname, slice, period, ts, web100_log_entry.connection_spec.remote_ip, rate_mbps
)
GROUP BY
hostname, slice, period, ts, remote_ip
)
GROUP BY
hostname, slice, period, remote_ip
""")
date_i = (i, a_s, b_s)
df_hosts[host][date_i] = result
print 'saved', i, date_i, host, len(df_hosts[host][date_i])
###Output
WARNING:google.auth._default:No project ID could be determined. Consider running `gcloud config set project` or setting the GOOGLE_CLOUD_PROJECT environment variable
###Markdown
PDF, CDF, & Switch - by Site and Slice
###Code
title = 'PDF, CDF, Switch - slice sidestream Download Rates'
label2date = {}
slices = ['samknows']
colors = plt.cm.tab10.colors
p2c = {}
c=0
for i, host in enumerate(sorted(df_hosts.keys())):
rows = 4
cols = len(periods_list)
fig = plt.figure(figsize=(4 * cols, 13))
axes = [
[None] * cols,
[None] * cols,
[None] * cols,
None,
]
for p, (x, p_a, p_b) in enumerate(sorted(df_hosts[host])):
axes[0][p] = plt.subplot2grid((rows, cols), (0, p))
axes[1][p] = plt.subplot2grid((rows, cols), (1, p))
axes[2][p] = plt.subplot2grid((rows, cols), (2, p))
for k, slicename in enumerate(slices):
df_ss = df_hosts[host][(x, p_a, p_b)]
if len(df_ss) == 0:
print 'skipping', host, 'no data'
continue
if len(df_ss[ df_ss['hostname'] == host ]) == 0:
print 'skipping', host
continue
a = df_ss[ (df_ss['slice'] == slicename) & (df_ss['period'] == p_a) ]
b = df_ss[ (df_ss['slice'] == slicename) & (df_ss['period'] == p_b) ]
columns = ['hostname', 'remote_ip', 'slice']
ds = pd.merge(a, b, how='left', left_on=columns, right_on=columns)
for period_str in [p_a, p_b]:
if period_str not in p2c:
p2c[period_str] = colors[c]
c += 1
if len(ds['sum_rate_mbps_x'].dropna()) == 0 or len(ds['sum_rate_mbps_y'].dropna()) == 0:
continue
# Top
ax = axes[0][p]
for period, l in [(p_a, ds['sum_rate_mbps_x']), (p_b, ds['sum_rate_mbps_y'])]:
vals = [math.log10(x) for x in l.dropna()]
period_str = period
label = 'pdf-%s-%s (%d)' % (period_str, slicename, len(vals))
label2date[label] = period
sqrt_bins = int(math.sqrt(len(vals)))
n, bins, patches = ax.hist(
vals, sqrt_bins,
histtype='step', normed=1, label=label, ls='-', color=p2c[period_str])
ax.set_axisbelow(True)
ax.legend(fontsize='x-small', loc='upper center', bbox_to_anchor=(0.5, 1.3))
ax.grid(color='#dddddd')
ax.set_title(host)
ax.xaxis.set_major_formatter(logFormatter)
# Middle
ax = axes[1][p]
for period, l in [(p_a, ds['sum_rate_mbps_x']), (p_b, ds['sum_rate_mbps_y'])]:
vals = [math.log10(x) for x in l.dropna()]
period_str = period
label = 'cdf-%s-%s (%d)' % (period_str, slicename, len(vals))
n, bins, patches = ax.hist(
vals, len(vals),
histtype='step', normed=1, cumulative=True, label=label, ls='-', color=p2c[period_str])
ax.xaxis.set_major_formatter(logFormatter)
ax.set_axisbelow(True)
ax.grid(color='#dddddd')
ax.set_title(host)
if p != 0:
ax.set_yticklabels([])
# Scatter.
ax = axes[2][p]
label = 'scatter-%s (%d)/(%d)' % (slicename, len(ds['sum_rate_mbps_x']), len(ds['sum_rate_mbps_y']))
ax.plot([0.1, 1000], [0.1, 1000], color='r', alpha=0.1)
ax.add_patch(
matplotlib.patches.Polygon(
[[.1, .1], [1000, .1], [1000, 1000], [.1, .1]], closed=True,
fill=True, color=p2c[p_b], alpha=0.1))
ax.add_patch(
matplotlib.patches.Polygon(
[[.1, .1], [.1, 1000], [1000, 1000], [.1, .1]], closed=True,
fill=True, color=p2c[p_a], alpha=0.1))
ax.scatter(ds['sum_rate_mbps_y'], ds['sum_rate_mbps_x'], s=2, alpha=0.3, label=label)
ax.set_xlim(.1, 1000)
ax.set_ylim(.1, 1000)
ax.set_xlabel(p_b)
ax.set_ylabel(p_a)
ax.grid(color='#dddddd')
ax.semilogx()
ax.semilogy()
ax.legend(fontsize='x-small')
axes[0][p].set_xlim(math.log10(.1), math.log10(1100))
axes[1][p].set_xlim(math.log10(.1), math.log10(1100))
# Bottom
axes[3] = plt.subplot2grid((rows, cols), (3, 0), colspan=cols)
ax = axes[3]
ds = df_disco_discards[ df_disco_discards['hostname'] == host ]
ax.plot_date(dates.epoch2num(ds['ts']), ds['pct_discards'], ls='-', ms=0, label='switch', color='mediumpurple')
ax.set_title(host)
ax.set_ylim(-0.01, 1)
ax.tick_params(axis='x', labelrotation=90)
ax.grid(color='#dddddd')
# Color switch regions with the PDF periods based on legend colors.
for p in range(0, len(df_hosts[host])):
h, l = axes[0][p].get_legend_handles_labels()
for k, line in enumerate(h):
s = label2date[l[k]]
s = datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
e = s + datetime.timedelta(days=4)
color = h[k].get_edgecolor()
ax.axvspan(dates.date2num(s), dates.date2num(e), alpha=0.5, color=color)
ax.set_ylabel('% discard timebins')
ax2 = ax.twinx()
ds = df_ss_count[ df_ss_count['hostname'] == host ]
ax2.plot_date(dates.epoch2num(ds['ts']), ds['count'], ls='-', ms=0, label='sidestream')
ax2.set_ylabel('Sidestream Flow Count')
ax2.grid(color='#dddddd')
ax.legend(loc=3, fontsize='small')
ax2.legend(loc=1, fontsize='small')
axes[0][0].set_ylabel('PDF')
axes[1][0].set_ylabel('CDF')
fig.suptitle(title)
fig.subplots_adjust(hspace=0.3, wspace=0.4)
plt.show()
###Output
_____no_output_____ |
Book_Recommendation_System.ipynb | ###Markdown
**Collaborative Filtering using k-NN**
###Code
# Merging the 'bookTitle' column with the User dataset columns
combine_book_rating = pd.merge(ratings, books, on='ISBN')
columns = ['yearOfPublication', 'publisher', 'bookAuthor', 'imageUrlS', 'imageUrlM', 'inageUrlL']
combine_book_rating = combine_book_rating.drop(columns, axis=1)
combine_book_rating.head()
# Group by book titles and create a new column for the total rating count and merge it with 'bookTitle'
combine_book_rating = combine_book_rating.dropna(axis=0, subset=['bookTitle'])
book_rating_count = (combine_book_rating.groupby(by=['bookTitle'])['bookRating'].count().reset_index().rename(columns={'bookRating': 'totalRatingCount'})[['bookTitle', 'totalRatingCount']])
book_rating_count.head()
# Merge 'totalRatingCount' with already merged 'combine_book_rating' dataframe
rating_with_totalRatingCount = combine_book_rating.merge(book_rating_count, left_on='bookTitle', right_on='bookTitle', how='left')
rating_with_totalRatingCount.head()
# Keeping a popularity threshold for books with 'totalRatingCount' always greater than the threshold
popularity_threshold = 50
rating_popular_book = rating_with_totalRatingCount.query('totalRatingCount >= @popularity_threshold')
rating_popular_book.head()
# Comparing books before and after applying popularity threshold
print(rating_with_totalRatingCount.shape)
print(rating_popular_book.shape)
###Output
(488742, 5)
(62149, 5)
###Markdown
Filter for US and Canada users only
###Code
# Get common locations
print(users.Location)
# Filtering users of US and Canada
combined = rating_popular_book.merge(users, left_on='userID', right_on='userID', how='left')
us_canada_user_rating = combined[combined['Location'].str.contains('usa|canada')]
us_canada_user_rating = us_canada_user_rating.drop('Age', axis=1)
us_canada_user_rating.head()
# Getting the csr matrix and pivot dataframe
from scipy.sparse import csr_matrix
us_canada_user_rating = us_canada_user_rating.drop_duplicates(['userID', 'bookTitle'])
us_canada_user_rating_pivot = us_canada_user_rating.pivot(index='bookTitle', columns='userID', values='bookRating').fillna(0.0)
us_canada_user_rating_matrix = csr_matrix(us_canada_user_rating_pivot.values)
us_canada_user_rating_pivot.head()
# Applying KNN with cosine metric and brute algorithm
from sklearn.neighbors import NearestNeighbors
model_knn = NearestNeighbors(metric='cosine', algorithm='brute')
model_knn.fit(us_canada_user_rating_matrix)
###Output
_____no_output_____
###Markdown
**Choose a random book from here:**
###Code
# Get a random choice for book whose recommendations are to be found out
query_index = np.random.choice(us_canada_user_rating_pivot.shape[0])
print(query_index)
distances, indices = model_knn.kneighbors(us_canada_user_rating_pivot.iloc[query_index, :].values.reshape(1,-1), n_neighbors=6)
# Print the title of the book
print(us_canada_user_rating_pivot.index[query_index])
###Output
A Map of the World
###Markdown
**Get the book's related recommendations:**
###Code
# Print the recommendations
for i in range(0, len(distances.flatten())):
if i == 0:
print('Recommendations for {0}:\n'.format(us_canada_user_rating_pivot.index[query_index]))
else:
print('{0}: {1}, with a distance of {2}'.format(i, us_canada_user_rating_pivot.index[indices.flatten()[i]], distances.flatten()[i]))
###Output
Recommendations for A Map of the World:
1: How to Make an American Quilt, with a distance of 0.7026849418945244
2: House of Sand and Fog, with a distance of 0.7224659363960837
3: Tara Road, with a distance of 0.7371261247194851
4: The Rapture of Canaan, with a distance of 0.7412075329366015
5: Vinegar Hill (Oprah's Book Club (Paperback)), with a distance of 0.7591511584432995
|
World Bank Population.ipynb | ###Markdown
Loading the DataDownloading the [population estimate](https://datacatalog.worldbank.org/dataset/population-estimates-and-projections) data set from the world bank we get a zip archive containing a set of csv files. The main data is in the EstimatesData file. We also load the EstimatesCountry file containing information about countries and enabling uns to filter for "real" countries as opposed to groups of countries.
###Code
df = pd.read_csv("Population-EstimatesData.csv")
df_back = df
df_countries = pd.read_csv("Population-EstimatesCountry.csv")
df_countries_back = df_countries
###Output
_____no_output_____
###Markdown
Understanding the StructureLooking at the basic data structure we see a somewhat unusual way of storing the data. World Bank models the data with the time dimension as columns. This is possible since we are dealing with yearly data here, but still unusual.Besides time the data has two more dimensions: Country and Indicator. Both are stored both as decscriptive text and as a code. I do not know what purpose the "Unnamed: 95" column has. It only contains NaN values, so we are just gonna drop it. We are also gonna drop the "Indicator Code" since it is only helpful if you have a good grasp of the World Bank indicators.
###Code
df.columns
###Output
_____no_output_____
###Markdown
There are 185 different indicators for all the 259 "countries". To make handling the data set a bit easies we will drop some of them. The data set seems to be tuned to enable easy visualisation of relevant data, so a lot of derived indicators are there.
###Code
df["Indicator Name"].value_counts()
###Output
_____no_output_____
###Markdown
Filtering the DataWe drop "imterpolated" indicators which are calculated from other values in the data set. We also drop percentage or rate based indicators since we want to look at actual populations numbers. Not that depending on your use case these indicators might be useful.
###Code
df = df[~df["Indicator Name"].str.contains("[Ii]nterpolated")]
df = df[~df["Indicator Name"].str.contains("%")]
df = df[~df["Indicator Name"].str.contains("[Rr]ate")]
df = df[~df["Indicator Name"].str.contains("[Pp]robability")]
df["Indicator Name"].value_counts()
###Output
_____no_output_____
###Markdown
We also filter for "real" countries. Note that the definition of a country is not as intuitive as it seems at first. You can ask [Google](https://www.google.com/search?q=how+many+countries+in+the+world) about it, but even there is no definite answer. Taiwan is one of the better known countries with a complicate answer.If we look at the countries data set we see that we get quite a bit information about the countries. We assume that a country must have a currency to filter out groupings of countries. Looking the entries without a currency this seems to make sense.
###Code
df_countries[df_countries["Currency Unit"].isna()]
###Output
_____no_output_____
###Markdown
Applying this filter we get 217 countries which seems quite a lot compared to the country numbers we get from Google. We are not gonna investigate further, for our purpose it will do.
###Code
country_list = df_countries[~df_countries["Currency Unit"].isna()]["Country Code"].values
df = df[df["Country Code"].isin(country_list)]
df["Country Code"].nunique()
###Output
_____no_output_____
###Markdown
Transforming the DataNow we get into the parts where Pandas really shines. The steps before could have been easily done with SQL or Excel. But we want to transform to a more common structure for analysis. First we are finally gonna drop some columns here because they would make the statements a bit longer and we don't need them after all.What we want to achieve is that the year is not a column in our data structure but a value since it is one obvious dimenstion for useful visualisations. For this we have to pivot the table and Pandas offers the powerful melt function to do this.For the pivot we specify a key identifying our measurements and a future column name where the remaining columns will be stored as values and their values the old structure in an additional column called "value". Sounds complicated? This operation is easier done than said for once, so we just try it. It is called melt in Pandas.Since for quite a few combinations of country, indicator and year the World Bank has no information this is also a good occasion to drop these values. Among the reasons for that are that the countries did not exist at that time or they could not provide numbers at the required level of detail.
###Code
df = df.drop("Indicator Code", axis=1)
df = df.drop('Unnamed: 95', axis=1)
df = df.melt(id_vars = ['Country Name', 'Country Code', 'Indicator Name'], var_name = "Year")
df = df.dropna()
df
###Output
_____no_output_____
###Markdown
Now this structure is nicely normalized and does not contain unnecessary information. We could base further analysis on that. However, most tools for analysis and visualisation expect a different structure. We define a measurement as an observation of a given country in a given year. All the variables we measure commonly are stored in columns.In order to get there we have to transform our data again. This time we want to transform the indicators from rows to columns. The goal is to have a set of indicator columns in each row for a given country and year. There is a powerful Pandas function for that called "pivot_table".Again we must give an indentifier, in this case called "index". We have to remember that we have kept two columns for the country, its name and its code, so we have to give both. The other part is year obviously. Now the values which are to be transformed into columns have to be given as "columns". We want the indicator names as the new columns. The values for these new columns we get from the column called value in the current structure.As before it is more difficult to explain it well than actually doing it and looking at the result. Normally we would get a nested index in Pandas as a result. This may be useful in specific use cases for easier aggregations. We just want a simple and flat structure so we reset the index after the operation. Note that we are introducing NaN values again as not all indicators are available in a given year for all countries.
###Code
df = df.pivot_table(index=["Country Name", "Country Code", "Year"], columns="Indicator Name", values="value")
df.reset_index(inplace=True)
df
###Output
_____no_output_____
###Markdown
Storing the ResultFor further processing we store the result as a CSV file. After resetting the index we just have an artificial counter as index which isn't helpful, so we store the CSV without the index. The resulting structure is well suited for most visualisation tools. Obviously we can also do further analysis on the data frame with Pandas in Python.
###Code
df.to_csv("population_pivot.csv", index=False)
###Output
_____no_output_____ |
03-Crawler_PTT_FittingCurve.ipynb | ###Markdown
0. 安裝需要的函式庫
###Code
import sys
!{sys.executable} -m pip install BeautifulSoup4
!{sys.executable} -m pip install lxml
!{sys.executable} -m pip install html5lib
!{sys.executable} -m pip install requests
!{sys.executable} -m pip install matplotlib
###Output
Requirement already satisfied: BeautifulSoup4 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (4.8.0)
Requirement already satisfied: soupsieve>=1.2 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from BeautifulSoup4) (1.9.3)
[33mWARNING: You are using pip version 19.2.3, however version 19.3.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Requirement already satisfied: lxml in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (4.4.1)
[33mWARNING: You are using pip version 19.2.3, however version 19.3.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Requirement already satisfied: html5lib in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (1.0.1)
Requirement already satisfied: six>=1.9 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from html5lib) (1.12.0)
Requirement already satisfied: webencodings in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from html5lib) (0.5.1)
[33mWARNING: You are using pip version 19.2.3, however version 19.3.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Requirement already satisfied: requests in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (2.22.0)
Requirement already satisfied: certifi>=2017.4.17 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from requests) (2019.6.16)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from requests) (1.25.3)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from requests) (3.0.4)
Requirement already satisfied: idna<2.9,>=2.5 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from requests) (2.8)
[33mWARNING: You are using pip version 19.2.3, however version 19.3.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Requirement already satisfied: matplotlib in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (3.1.1)
Requirement already satisfied: python-dateutil>=2.1 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from matplotlib) (2.8.0)
Requirement already satisfied: cycler>=0.10 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from matplotlib) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from matplotlib) (1.1.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from matplotlib) (2.4.2)
Requirement already satisfied: numpy>=1.11 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from matplotlib) (1.17.1)
Requirement already satisfied: six>=1.5 in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from python-dateutil>=2.1->matplotlib) (1.12.0)
Requirement already satisfied: setuptools in /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages (from kiwisolver>=1.0.1->matplotlib) (41.2.0)
[33mWARNING: You are using pip version 19.2.3, however version 19.3.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
###Markdown
體驗PTT爬蟲
###Code
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import re
import csv
import time
import requests
from bs4 import BeautifulSoup
PTT_URL = 'https://www.ptt.cc'
target_url = '/bbs/MacShop/index.html'
page_limit = 1
def get_web_page(url):
resp = requests.get(
url=url,
cookies={'over18': '1'}
)
if resp.status_code != 200:
print('Invalid url:', resp.url)
return None
else:
return resp.text
def get_content(dom):
soup = BeautifulSoup(dom, 'html.parser')
divs = soup.find_all('div', id='main-container')
price = "none"
for d in divs:
content=re.split(':| |\n',d.text)
#print(content)
for index in range(len(content)):
if content[index] =="[交易價格]":
pre_price = content[index+1]
price_list = filter(str.isdigit, pre_price)
price = ''.join(list(price_list))
return price
def get_articles(dom, date):
soup = BeautifulSoup(dom, 'html.parser')
paging_div = soup.find('div', 'btn-group btn-group-paging')# 取得上一頁的連結
prev_url = paging_div.find_all('a')[1]['href']
articles = [] # 儲存取得的文章資料
divs = soup.find_all('div', 'r-ent')
for d in divs:
D_day = d.find('div', 'date').text.strip()
if D_day == date:
# 取得推文數
push_count = 0
push_str = d.find('div', 'nrec').text
if push_str:
try:
push_count = int(push_str)
except ValueError:
if push_str == '爆':
push_count = 99
elif push_str.startswith('X'):
push_count = -10
# 取得文章連結及標題
if d.find('a'): # 有超連結,表示文章存在,未被刪除
href = d.find('a')['href']
price = get_content(get_web_page(PTT_URL + href))
title = d.find('a').text
author = d.find('div', 'author').text if d.find('div', 'author') else ''
titles = title.split("]")
#print(titles)
if len(titles)==2:
data_content = {
'category':titles[0][1:3],
'title': titles[1][1:],
'href': href,
'push_count': push_count,
'author': author,
'price' : price,
'date' : D_day
}
articles.append(data_content)
print(data_content)
return articles, prev_url
if __name__ == '__main__':
articles,detail = [],[]
page_counter = 0
new_articles = 1
date = time.strftime("%m/%d").lstrip('0')
while new_articles:
page_counter +=1
if new_articles!=1 :
articles += new_articles
current_page = get_web_page(PTT_URL + target_url)
if current_page:
new_articles, target_url = get_articles(current_page, date)
if page_counter>page_limit:
break
with open('ptt.csv','w',newline='') as csvfile:
writer = csv.writer(csvfile)
fieldnames = ['category', 'title', 'href','push_count','author','price','date']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for article in articles:
print(article)
writer.writerow(article)
#for a in articles:
#print(a)
###Output
{'category': '收購', 'title': '雙北 iPhone X/XS 整新', 'href': '/bbs/MacShop/M.1573715814.A.CBD.html', 'push_count': 0, 'author': 's111121', 'price': 'none', 'date': '11/14'}
{'category': '收購', 'title': ' 桃園 iphone 11 顏色皆可', 'href': '/bbs/MacShop/M.1573715894.A.FEF.html', 'push_count': 0, 'author': 'juchii', 'price': 'none', 'date': '11/14'}
{'category': '販售', 'title': '台北 全新神腦購入 iphone 11 綠色 紫色 ', 'href': '/bbs/MacShop/M.1573716094.A.897.html', 'push_count': 0, 'author': 'mr38', 'price': '23500', 'date': '11/14'}
{'category': '販售', 'title': '彰化 6s plus & 7', 'href': '/bbs/MacShop/M.1573716360.A.3A5.html', 'push_count': 0, 'author': 'tsj47', 'price': '50006500', 'date': '11/14'}
{'category': '販售', 'title': 'Apple Watch series 3 GPS版', 'href': '/bbs/MacShop/M.1573716372.A.C66.html', 'push_count': 0, 'author': 'tt8077919', 'price': '5800', 'date': '11/14'}
{'category': '販售', 'title': '台中 全新 11 pro max 256g 綠 39900', 'href': '/bbs/MacShop/M.1573716576.A.BFF.html', 'push_count': 0, 'author': 'snoopy821218', 'price': '39900', 'date': '11/14'}
{'category': '販售', 'title': '台中 iPhone X 64G 銀色 全新整新機', 'href': '/bbs/MacShop/M.1573716661.A.2F5.html', 'push_count': 0, 'author': 'maccam', 'price': '13000', 'date': '11/14'}
{'category': '販售', 'title': 'MacBook Pro Retina 13” 2018 TB', 'href': '/bbs/MacShop/M.1573716717.A.1BF.html', 'push_count': 0, 'author': 'ben1117', 'price': '45000', 'date': '11/14'}
{'category': '販售', 'title': 'iPhone 11 ProMax256G銀 全新未拆', 'href': '/bbs/MacShop/M.1573716793.A.A47.html', 'push_count': 0, 'author': 'gavin779a', 'price': '40200', 'date': '11/14'}
{'category': '販售', 'title': '台北 全新iPhone 11 64GB 綠', 'href': '/bbs/MacShop/M.1573716940.A.6F2.html', 'push_count': 0, 'author': 'PHrose', 'price': '23000', 'date': '11/14'}
{'category': '販售', 'title': '高雄 iPhone 11 Pro Max 256G 夜幕綠', 'href': '/bbs/MacShop/M.1573712246.A.6CB.html', 'push_count': 0, 'author': 'shangyo1985', 'price': '40500', 'date': '11/14'}
{'category': '販售', 'title': '中部地區 iPhoneX 256G', 'href': '/bbs/MacShop/M.1573712604.A.D3C.html', 'push_count': 0, 'author': 'cashpaggy', 'price': '16000', 'date': '11/14'}
{'category': '販售', 'title': '雙北 iPhone 11 128G 紅 $24500', 'href': '/bbs/MacShop/M.1573712632.A.7D2.html', 'push_count': 0, 'author': 'cchysw3021', 'price': '24500', 'date': '11/14'}
{'category': '販售', 'title': '雙北 現貨 全新 iPhone 11 pro 512 綠', 'href': '/bbs/MacShop/M.1573713238.A.D4A.html', 'push_count': 0, 'author': 'pinkbow2', 'price': '44000', 'date': '11/14'}
{'category': '收購', 'title': '新北airpods2 有線版', 'href': '/bbs/MacShop/M.1573713348.A.26E.html', 'push_count': 0, 'author': 'asd355487', 'price': 'none', 'date': '11/14'}
{'category': '販售', 'title': '雙北 iPhone 11 Pro 64G 綠', 'href': '/bbs/MacShop/M.1573713588.A.F67.html', 'push_count': 0, 'author': 'd87313', 'price': '32000', 'date': '11/14'}
{'category': '販售', 'title': '嘉義 iPhone 8 64G 玫瑰金', 'href': '/bbs/MacShop/M.1573713677.A.75D.html', 'push_count': 0, 'author': 'BassChuck', 'price': '8000', 'date': '11/14'}
{'category': '收購', 'title': 'ipad air 3 64g 金色 & apple pencil 1', 'href': '/bbs/MacShop/M.1573713709.A.28A.html', 'push_count': 0, 'author': 'terry6203320', 'price': 'none', 'date': '11/14'}
{'category': '交換', 'title': 'iPhone 11 pro max 64 綠 貼換 pro 256綠', 'href': '/bbs/MacShop/M.1573713750.A.0FC.html', 'push_count': 0, 'author': 'pinkbow2', 'price': 'none', 'date': '11/14'}
{'category': '販售', 'title': '北 Ipad mini 4 LTE 128G 金色', 'href': '/bbs/MacShop/M.1573713768.A.D99.html', 'push_count': 0, 'author': 'lanlimit', 'price': '7700', 'date': '11/14'}
{'category': '販售', 'title': '台中 二手 iPhone X 256g 太空灰 13500', 'href': '/bbs/MacShop/M.1573714044.A.22E.html', 'push_count': 0, 'author': 'maccam', 'price': 'none', 'date': '11/14'}
{'category': '販售', 'title': '台北 2018 MacBook Pro 15 16/512G 加保', 'href': '/bbs/MacShop/M.1573714069.A.B03.html', 'push_count': 0, 'author': 'tony330857', 'price': '70000', 'date': '11/14'}
{'category': '販售', 'title': 'IPhone 8 Plus 256g 8+', 'href': '/bbs/MacShop/M.1573714339.A.EE0.html', 'push_count': 0, 'author': 'jason241710', 'price': '15000', 'date': '11/14'}
{'category': '販售', 'title': '全國 AirPods 2代 全新未拆 有線', 'href': '/bbs/MacShop/M.1573714350.A.162.html', 'push_count': 0, 'author': 'ga665420', 'price': '3700', 'date': '11/14'}
{'category': '收購', 'title': '台北 airpods pro', 'href': '/bbs/MacShop/M.1573714391.A.F3E.html', 'push_count': 0, 'author': 'cg50904', 'price': 'none', 'date': '11/14'}
{'category': '販售', 'title': 'iPhone 7 Plus 128g 金色 螢幕電池全新 8500 ', 'href': '/bbs/MacShop/M.1573714684.A.C94.html', 'push_count': 0, 'author': 'maccam', 'price': '8500', 'date': '11/14'}
{'category': '收購', 'title': '台北 我想買 全新 iPhone 8 64GB', 'href': '/bbs/MacShop/M.1573714853.A.99B.html', 'push_count': 0, 'author': 'jason691121', 'price': 'none', 'date': '11/14'}
{'category': '販售', 'title': 'iphone6金128G', 'href': '/bbs/MacShop/M.1573714928.A.F08.html', 'push_count': 0, 'author': 'walalalulu', 'price': '1500', 'date': '11/14'}
{'category': '販售', 'title': 'iPad Pro 12.9" wifi 全新 整新機 第二代', 'href': '/bbs/MacShop/M.1573715449.A.6D4.html', 'push_count': 0, 'author': 'maccam', 'price': '25000', 'date': '11/14'}
{'category': '販售', 'title': '中壢新竹 iPad 2018 (6th) 32G 金$8500', 'href': '/bbs/MacShop/M.1573715750.A.417.html', 'push_count': 1, 'author': 'ForeverT', 'price': '9000', 'date': '11/14'}
{'category': '收購', 'title': '雙北 iPhone X/XS 整新', 'href': '/bbs/MacShop/M.1573715814.A.CBD.html', 'push_count': 0, 'author': 's111121', 'price': 'none', 'date': '11/14'}
{'category': '收購', 'title': ' 桃園 iphone 11 顏色皆可', 'href': '/bbs/MacShop/M.1573715894.A.FEF.html', 'push_count': 0, 'author': 'juchii', 'price': 'none', 'date': '11/14'}
{'category': '販售', 'title': '台北 全新神腦購入 iphone 11 綠色 紫色 ', 'href': '/bbs/MacShop/M.1573716094.A.897.html', 'push_count': 0, 'author': 'mr38', 'price': '23500', 'date': '11/14'}
{'category': '販售', 'title': '彰化 6s plus & 7', 'href': '/bbs/MacShop/M.1573716360.A.3A5.html', 'push_count': 0, 'author': 'tsj47', 'price': '50006500', 'date': '11/14'}
{'category': '販售', 'title': 'Apple Watch series 3 GPS版', 'href': '/bbs/MacShop/M.1573716372.A.C66.html', 'push_count': 0, 'author': 'tt8077919', 'price': '5800', 'date': '11/14'}
{'category': '販售', 'title': '台中 全新 11 pro max 256g 綠 39900', 'href': '/bbs/MacShop/M.1573716576.A.BFF.html', 'push_count': 0, 'author': 'snoopy821218', 'price': '39900', 'date': '11/14'}
{'category': '販售', 'title': '台中 iPhone X 64G 銀色 全新整新機', 'href': '/bbs/MacShop/M.1573716661.A.2F5.html', 'push_count': 0, 'author': 'maccam', 'price': '13000', 'date': '11/14'}
{'category': '販售', 'title': 'MacBook Pro Retina 13” 2018 TB', 'href': '/bbs/MacShop/M.1573716717.A.1BF.html', 'push_count': 0, 'author': 'ben1117', 'price': '45000', 'date': '11/14'}
{'category': '販售', 'title': 'iPhone 11 ProMax256G銀 全新未拆', 'href': '/bbs/MacShop/M.1573716793.A.A47.html', 'push_count': 0, 'author': 'gavin779a', 'price': '40200', 'date': '11/14'}
{'category': '販售', 'title': '台北 全新iPhone 11 64GB 綠', 'href': '/bbs/MacShop/M.1573716940.A.6F2.html', 'push_count': 0, 'author': 'PHrose', 'price': '23000', 'date': '11/14'}
###Markdown
1. 匯入函式庫
###Code
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import re
import csv
import time
import requests
import numpy as np
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
from datetime import datetime, date
###Output
_____no_output_____
###Markdown
2. 定義網址、資料儲存位置等變數,並新增對應csv檔
###Code
PTT_URL = 'https://www.ptt.cc'
target_url = '/bbs/nb-shopping/search?page=1&q=surface%20pro%203'
csv_filename = 'ptt_surface3.csv'
fileexist = os.path.isfile(csv_filename)
header = ['category', 'title', 'href','push_count','author','price','date']
with open(csv_filename,'w',newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
#writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#writer.writeheader()
###Output
_____no_output_____
###Markdown
3. 定義函式-檢查標題是否有關鍵字
###Code
def word_in_string(pre_str, word):
flag = 0
for i in range(len(pre_str)-len(word)+1):
if pre_str[i:i+len(word)]==word:
#print(pre_str[i:i+len(word)], word)
flag=1
break
return flag
###Output
_____no_output_____
###Markdown
4. 定義函式-數字轉換(日期用)
###Code
def digit_filter_and_convert(pre_str):
new_str = ''
new_int = 1
digit_str='0123456789'
for s in pre_str:
for i in digit_str:
if s==i:
new_str = new_str+i
break
new_int=int(new_str)
return new_int
###Output
_____no_output_____
###Markdown
5. 定義函式-從文章內容中取得日期
###Code
def get_date_from_content(content_all):
month_lib = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
#post_date = -1
#post_date = date(year = 2020, month = 1, day = 1)
yy,mm,dd,HH,MM,SS=1,1,1,1,1,1
post_datetime = -1
for index in range(len(content_all)-1):
if content_all[index:index+2] =="時間":
time_str = content_all[index+2:index+27]
time_list = re.split(':| |\n',time_str)
while '' in time_list:
time_list.remove('')
#print(time_list)
for m in range(len(month_lib)):
if month_lib[m] == time_list[1]:
mm = m+1
try:
yy = digit_filter_and_convert(time_list[6])
dd = digit_filter_and_convert(time_list[2])
HH = digit_filter_and_convert(time_list[3])
MM = digit_filter_and_convert(time_list[4])
SS = digit_filter_and_convert(time_list[5])
post_datetime = datetime(year = yy,
month = m+1,
day = dd,
hour = HH,
minute = MM,
second = SS)
except:
pass
#print(time_str)
print(post_datetime)
break
return post_datetime
###Output
_____no_output_____
###Markdown
6. 定義函式-取得網頁時的相關設定
###Code
def get_web_page(url):
resp = requests.get(
url=url,
cookies={'over18': '1'}
)
if resp.status_code != 200:
print('Invalid url:', resp.url)
return None
else:
return resp.text
###Output
_____no_output_____
###Markdown
7. 定義函式-從文章內容中取得交易價格跟發文日期
###Code
def get_content(dom):
soup = BeautifulSoup(dom, 'html.parser')
divs = soup.find_all('div', id = 'main-container')
price = "none"
for d in divs:
content=re.split(':| |\n',d.text)
post_date = get_date_from_content(d.text)
#print(d.text)
#print(content)
for index in range(len(content)):
if word_in_string(content[index], '交易價格'):
pre_price = content[index+1]
price_list = filter(str.isdigit, pre_price)
price = ''.join(list(price_list))
return price, post_date
###Output
_____no_output_____
###Markdown
8. 定義函式-從網頁中取得每個連結的文章資訊
###Code
def get_articles(dom):
try :
soup = BeautifulSoup(dom, 'html.parser')
paging_div = soup.find('div', 'btn-group btn-group-paging')# 取得上一頁的連結
prev_url = paging_div.find_all('a')[1]['href']
articles = [] # 儲存取得的文章資料
divs = soup.find_all('div', 'r-ent')
for d in divs:
if d.find('a'): # 有超連結,表示文章存在,未被刪除
#D_day = d.find('div', 'date').text.strip()
href = d.find('a')['href']
price, post_date = get_content(get_web_page(PTT_URL + href))
title = d.find('a').text
author = d.find('div', 'author').text if d.find('div', 'author') else ''
titles = title.split("]")
#print(titles)
if len(titles)==2:
data_content = {
'category':titles[0][1:3],
'title': titles[1][1:],
'href': href,
'author': author,
'price' : price,
'date' : post_date
}
articles.append(data_content)
print(data_content)
with open(csv_filename,'a',newline='') as csvfile:
writer = csv.writer(csvfile)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
for article in articles:
#print(article)
writer.writerow(article)
return articles, prev_url
except:
return 1, -1
###Output
_____no_output_____
###Markdown
9. 主要程式-讀取所有搜尋結果顯示出的文章(時間較長,請耐心等待)
###Code
if __name__ == '__main__':
articles,detail = [],[]
new_articles = 1
while new_articles:
if new_articles!=1 :
articles += new_articles
try:
current_page = get_web_page(PTT_URL + target_url)
except:
current_page = -1
if current_page:
new_articles, target_url = get_articles(current_page)
###Output
2019-11-14 01:49:20
{'category': '賣/', 'title': 'Surface Pro 3 台南高雄', 'href': '/bbs/nb-shopping/M.1573667362.A.5C0.html', 'author': 'poohpandas', 'price': '6800', 'date': datetime.datetime(2019, 11, 14, 1, 49, 20)}
2019-11-08 00:25:57
2019-11-06 19:56:35
{'category': '賣/', 'title': 'surface pro 3 附鍵盤、手寫筆', 'href': '/bbs/nb-shopping/M.1573041397.A.369.html', 'author': 'Yuan', 'price': '', 'date': datetime.datetime(2019, 11, 6, 19, 56, 35)}
2019-11-06 00:12:53
{'category': '賣/', 'title': 'Surface Pro 3 附鍵盤', 'href': '/bbs/nb-shopping/M.1572970375.A.AF7.html', 'author': 'poohpandas', 'price': '7000', 'date': datetime.datetime(2019, 11, 6, 0, 12, 53)}
2019-11-04 03:00:27
{'category': '賣/', 'title': '降價,Surface pro 3 8gb/ 256g', 'href': '/bbs/nb-shopping/M.1572807629.A.ED1.html', 'author': 'pandadna', 'price': '1000019000', 'date': datetime.datetime(2019, 11, 4, 3, 0, 27)}
2019-11-01 23:04:00
{'category': '賣/', 'title': 'Surface Pro 3', 'href': '/bbs/nb-shopping/M.1572620645.A.5E0.html', 'author': 'poohpandas', 'price': '7000', 'date': datetime.datetime(2019, 11, 1, 23, 4)}
2019-10-31 16:48:42
{'category': '賣/', 'title': 'Surface Pro 3 4G/128G 售出', 'href': '/bbs/nb-shopping/M.1572511726.A.595.html', 'author': 'kaiba541', 'price': '10000', 'date': datetime.datetime(2019, 10, 31, 16, 48, 42)}
2019-10-28 17:54:30
{'category': '賣/', 'title': ' Surface Pro 3 i5/8G/256G 有鍵盤', 'href': '/bbs/nb-shopping/M.1572256472.A.CF8.html', 'author': 'baritone7563', 'price': '14000', 'date': datetime.datetime(2019, 10, 28, 17, 54, 30)}
2019-10-16 03:24:10
{'category': '賣/', 'title': '(售出) Surface Pro 3 i5/8G/256G', 'href': '/bbs/nb-shopping/M.1571167452.A.B11.html', 'author': 'sexydiamond', 'price': '16500', 'date': datetime.datetime(2019, 10, 16, 3, 24, 10)}
2019-10-12 13:38:20
{'category': '賣/', 'title': 'urface Pro 3 i5 4G/128G. 已售出, 謝謝!', 'href': '/bbs/nb-shopping/M.1570858702.A.8CB.html', 'author': 'iammie', 'price': '11500', 'date': datetime.datetime(2019, 10, 12, 13, 38, 20)}
2019-09-18 22:56:28
{'category': '賣/', 'title': 'surface pro 3[已售]', 'href': '/bbs/nb-shopping/M.1568818590.A.32D.html', 'author': 'Ohmygod0908', 'price': '7000', 'date': datetime.datetime(2019, 9, 18, 22, 56, 28)}
2019-09-17 23:06:52
{'category': '賣/', 'title': 'Surface Pro 3 i5/8g/256G(雙鍵盤)售出', 'href': '/bbs/nb-shopping/M.1568732814.A.60D.html', 'author': 'JBROTHER', 'price': '1000', 'date': datetime.datetime(2019, 9, 17, 23, 6, 52)}
2019-09-13 15:37:42
{'category': '徵/', 'title': 'Surface Pro 3', 'href': '/bbs/nb-shopping/M.1568360284.A.292.html', 'author': 'Kans9527', 'price': '75009500', 'date': datetime.datetime(2019, 9, 13, 15, 37, 42)}
2019-09-12 23:06:41
{'category': '賣/', 'title': 'Surface Pro 3 i5/8g/256G(雙鍵盤) ', 'href': '/bbs/nb-shopping/M.1568300803.A.B97.html', 'author': 'JBROTHER', 'price': '1000', 'date': datetime.datetime(2019, 9, 12, 23, 6, 41)}
2019-08-31 10:53:11
{'category': '賣/', 'title': 'Surface Pro 3 i5/4g/128g', 'href': '/bbs/nb-shopping/M.1567219995.A.E1C.html', 'author': 'halaluya', 'price': '4300', 'date': datetime.datetime(2019, 8, 31, 10, 53, 11)}
2019-08-29 21:46:49
{'category': '賣/', 'title': 'surface pro 3 i5 4g 128g', 'href': '/bbs/nb-shopping/M.1567086411.A.60C.html', 'author': 'fongandy', 'price': '70006000', 'date': datetime.datetime(2019, 8, 29, 21, 46, 49)}
2019-08-27 00:53:45
{'category': '賣/', 'title': 'Surface Pro 3 i5/8g/256G(雙鍵盤) ', 'href': '/bbs/nb-shopping/M.1566838427.A.FB8.html', 'author': 'JBROTHER', 'price': '', 'date': datetime.datetime(2019, 8, 27, 0, 53, 45)}
2019-08-19 23:57:01
{'category': '賣/', 'title': 'Surface Pro 3 i5/8g/256G(雙鍵盤)', 'href': '/bbs/nb-shopping/M.1566230223.A.05D.html', 'author': 'JBROTHER', 'price': '', 'date': datetime.datetime(2019, 8, 19, 23, 57, 1)}
2019-08-15 02:29:12
{'category': '賣/', 'title': ' surface pro 3 i5 4g 128g', 'href': '/bbs/nb-shopping/M.1565807354.A.4FA.html', 'author': 'ttyuu', 'price': '9500', 'date': datetime.datetime(2019, 8, 15, 2, 29, 12)}
2019-08-12 17:56:44
{'category': '賣/', 'title': 'Surface Pro 3 i5/8g/256G(雙鍵盤)', 'href': '/bbs/nb-shopping/M.1565603807.A.87F.html', 'author': 'JBROTHER', 'price': '', 'date': datetime.datetime(2019, 8, 12, 17, 56, 44)}
2019-07-31 19:25:23
{'category': '賣/', 'title': 'Surface Pro 3/Surface 3 128G 鍵盤', 'href': '/bbs/nb-shopping/M.1564572325.A.EEE.html', 'author': 'ragwing', 'price': '7000', 'date': datetime.datetime(2019, 7, 31, 19, 25, 23)}
2019-07-08 21:51:57
{'category': '賣/', 'title': 'surface pro 3 i5/128g/4g', 'href': '/bbs/nb-shopping/M.1562593919.A.0F6.html', 'author': 'qqxu5', 'price': '9500', 'date': datetime.datetime(2019, 7, 8, 21, 51, 57)}
2019-07-02 00:19:11
{'category': '賣/', 'title': 'surface pro 3 i5/4g/128G', 'href': '/bbs/nb-shopping/M.1561997953.A.EF9.html', 'author': 'qqxu5', 'price': '10000', 'date': datetime.datetime(2019, 7, 2, 0, 19, 11)}
2019-06-25 14:13:49
{'category': '徵/', 'title': '微Surface Pro 3/4 希望配件齊全 ', 'href': '/bbs/nb-shopping/M.1561443232.A.55D.html', 'author': 'panio', 'price': '800011000', 'date': datetime.datetime(2019, 6, 25, 14, 13, 49)}
2019-06-24 15:16:50
{'category': '賣/', 'title': '微軟 Surface pro 3 4G/128G', 'href': '/bbs/nb-shopping/M.1561360612.A.350.html', 'author': 'yikai1203', 'price': '10000', 'date': datetime.datetime(2019, 6, 24, 15, 16, 50)}
2019-06-15 23:30:03
{'category': '討論', 'title': '詢價 二手surface pro 3', 'href': '/bbs/nb-shopping/M.1560612605.A.37A.html', 'author': 'Ohmygod0908', 'price': 'none', 'date': datetime.datetime(2019, 6, 15, 23, 30, 3)}
2019-05-13 22:19:30
{'category': '賣/', 'title': 'Surface Pro 3 i5 4G/128G ', 'href': '/bbs/nb-shopping/M.1557757176.A.427.html', 'author': 'tdst', 'price': '7000', 'date': datetime.datetime(2019, 5, 13, 22, 19, 30)}
2019-05-11 11:27:23
{'category': '賣/', 'title': '售出surface pro 3 i5/8G/256G 售出', 'href': '/bbs/nb-shopping/M.1557545245.A.B6B.html', 'author': 'OneSeven', 'price': '11000', 'date': datetime.datetime(2019, 5, 11, 11, 27, 23)}
2019-05-05 23:21:14
{'category': '賣/', 'title': 'urface pro 3 i5/8g/256g ', 'href': '/bbs/nb-shopping/M.1557069676.A.FD3.html', 'author': 'richao', 'price': '', 'date': datetime.datetime(2019, 5, 5, 23, 21, 14)}
2019-03-31 06:28:58
{'category': '賣/', 'title': 'surface pro 3i3-4020Y 4G/128G', 'href': '/bbs/nb-shopping/M.1553984942.A.01B.html', 'author': 'nung0410', 'price': '10000', 'date': datetime.datetime(2019, 3, 31, 6, 28, 58)}
2019-03-23 18:27:31
{'category': '賣/', 'title': 'Surface Pro 3 i5/4GB 128GB(已售出)', 'href': '/bbs/nb-shopping/M.1553336853.A.F03.html', 'author': 'key681205', 'price': '9500', 'date': datetime.datetime(2019, 3, 23, 18, 27, 31)}
2019-03-19 22:50:22
{'category': '賣/', 'title': 'Surface Pro 3 I5 8G/256G', 'href': '/bbs/nb-shopping/M.1553007024.A.89A.html', 'author': 'iam1417', 'price': '12800', 'date': datetime.datetime(2019, 3, 19, 22, 50, 22)}
2019-03-08 21:39:11
{'category': '賣/', 'title': 'urface pro 3 8g 256g (已售出)', 'href': '/bbs/nb-shopping/M.1552052354.A.AC5.html', 'author': 'pcpx1536', 'price': '8500', 'date': datetime.datetime(2019, 3, 8, 21, 39, 11)}
2019-03-03 21:48:56
{'category': '賣/', 'title': 'Surface Pro 3 I5 8G/256G', 'href': '/bbs/nb-shopping/M.1551620938.A.B79.html', 'author': 'iam1417', 'price': '12800', 'date': datetime.datetime(2019, 3, 3, 21, 48, 56)}
2019-02-27 19:27:03
{'category': '賣/', 'title': 'urface Pro 3 I5 8G/256G (已售出', 'href': '/bbs/nb-shopping/M.1551266829.A.9CE.html', 'author': 'fgh123d', 'price': '9000', 'date': datetime.datetime(2019, 2, 27, 19, 27, 3)}
2019-02-25 21:27:40
{'category': '賣/', 'title': '賣出 surface pro 3 i5 4G 128G', 'href': '/bbs/nb-shopping/M.1551101263.A.578.html', 'author': 'menchi', 'price': '8000', 'date': datetime.datetime(2019, 2, 25, 21, 27, 40)}
2019-02-24 15:21:57
{'category': '賣/', 'title': 'Surface Pro 3 i5/8G/256G/鍵盤(售出)', 'href': '/bbs/nb-shopping/M.1550992920.A.4F5.html', 'author': 'kimwang', 'price': '10000', 'date': datetime.datetime(2019, 2, 24, 15, 21, 57)}
2019-02-09 10:10:45
{'category': '賣/', 'title': 'urface Pro 3 i7/512G/鍵盤/基座/筆 ', 'href': '/bbs/nb-shopping/M.1549678249.A.EF6.html', 'author': 'a8747436', 'price': '19000', 'date': datetime.datetime(2019, 2, 9, 10, 10, 45)}
2019-02-04 14:16:53
{'category': '賣/', 'title': '(已售出)Surface Pro 3 i7/512G/鍵盤/觸控筆', 'href': '/bbs/nb-shopping/M.1549261016.A.7A8.html', 'author': 'Borgia', 'price': '19000', 'date': datetime.datetime(2019, 2, 4, 14, 16, 53)}
###Markdown
10. 繪製圖表及預測曲線-已發售天數vs二手價格
###Code
release_datetime = datetime(year = 2014, month = 6, day = 20, hour = 10, minute = 0, second = 0)
x, y = [], []
with open('ptt_surface3.csv', 'r') as csvfile:
r= csv.reader(csvfile, delimiter=',')
for i,row in enumerate(r):
print(row[6], row[5])
if i > 0 and row[6] != '-1' and row[5].isdigit() :
time_list = re.split(':| |-',row[6])
if 2000 < int(time_list[0]) and 2020 > int(time_list[0]):
post_datetime = datetime(year = int(time_list[0]),
month = int(time_list[1]),
day = int(time_list[2]),
hour = int(time_list[3]),
minute = int(time_list[4]),
second = int(time_list[5]))
print(post_datetime)
if int(row[5])> 5000 and int(row[5]) < 50000:
delta = post_datetime-release_datetime
x.append(delta.days)
y.append(int(row[5]))
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x,y,'b.',x,p(x),'r-')
#plt.plot(x,Y,'r-')
plt.title('Surface prices in timeline')
plt.xlabel('Days after release')
plt.ylabel('Price (NTD)')
#plt.legend(loc='upper right')
plt.xticks(np.arange(0, 2800, 400))
plt.yticks(np.arange(0, 60000, 10000))
plt.show()
print('The formula of second hand price of surface pro 3 : %s + %s * days'% (round(z[1]) ,round(z[0],2)))
###Output
date price
2019-11-14 01:49:20 6800
2019-11-14 01:49:20
2019-11-06 19:56:35
2019-11-06 00:12:53 7000
2019-11-06 00:12:53
2019-11-04 03:00:27 1000019000
2019-11-04 03:00:27
2019-11-01 23:04:00 7000
2019-11-01 23:04:00
2019-10-31 16:48:42 10000
2019-10-31 16:48:42
2019-10-28 17:54:30 14000
2019-10-28 17:54:30
2019-10-16 03:24:10 16500
2019-10-16 03:24:10
2019-10-12 13:38:20 11500
2019-10-12 13:38:20
2019-09-18 22:56:28 7000
2019-09-18 22:56:28
2019-09-17 23:06:52 1000
2019-09-17 23:06:52
2019-09-13 15:37:42 75009500
2019-09-13 15:37:42
2019-09-12 23:06:41 1000
2019-09-12 23:06:41
2019-08-31 10:53:11 4300
2019-08-31 10:53:11
2019-08-29 21:46:49 70006000
2019-08-29 21:46:49
2019-08-27 00:53:45
2019-08-19 23:57:01
2019-08-15 02:29:12 9500
2019-08-15 02:29:12
2019-08-12 17:56:44
2019-07-31 19:25:23 7000
2019-07-31 19:25:23
2019-07-08 21:51:57 9500
2019-07-08 21:51:57
2019-07-02 00:19:11 10000
2019-07-02 00:19:11
2019-06-25 14:13:49 800011000
2019-06-25 14:13:49
2019-06-24 15:16:50 10000
2019-06-24 15:16:50
2019-06-15 23:30:03 none
2019-05-13 22:19:30 7000
2019-05-13 22:19:30
2019-05-11 11:27:23 11000
2019-05-11 11:27:23
2019-05-05 23:21:14
2019-03-31 06:28:58 10000
2019-03-31 06:28:58
2019-03-23 18:27:31 9500
2019-03-23 18:27:31
2019-03-19 22:50:22 12800
2019-03-19 22:50:22
2019-03-08 21:39:11 8500
2019-03-08 21:39:11
2019-03-03 21:48:56 12800
2019-03-03 21:48:56
2019-02-27 19:27:03 9000
2019-02-27 19:27:03
2019-02-25 21:27:40 8000
2019-02-25 21:27:40
2019-02-24 15:21:57 10000
2019-02-24 15:21:57
2019-02-09 10:10:45 19000
2019-02-09 10:10:45
2019-02-04 14:16:53 19000
2019-02-04 14:16:53
2019-01-09 14:01:48 8000
2019-01-09 14:01:48
2018-12-19 10:58:04 15000
2018-12-19 10:58:04
2018-12-18 19:36:45 9000
2018-12-18 19:36:45
2018-11-21 15:31:34 18000
2018-11-21 15:31:34
2018-11-10 20:10:06 16500
2018-11-10 20:10:06
2018-11-07 12:22:01 17500
2018-11-07 12:22:01
2018-10-28 23:31:49 18500
2018-10-28 23:31:49
2018-10-07 12:53:30 8000
2018-10-07 12:53:30
2018-09-03 21:40:29 4000
2018-09-03 21:40:29
2018-09-01 11:58:07 8000
2018-09-01 11:58:07
2018-07-15 21:38:22 10000
2018-07-15 21:38:22
2018-07-11 15:49:04 10000
2018-07-11 15:49:04
2018-07-04 04:22:05 13500
2018-07-04 04:22:05
2018-07-02 15:36:14 4
2018-07-02 15:36:14
2018-06-28 09:51:02 10500
2018-06-28 09:51:02
2018-06-09 23:43:20 9500
2018-06-09 23:43:20
2018-05-16 07:38:54 14000
2018-05-16 07:38:54
2018-04-25 22:22:46 15000
2018-04-25 22:22:46
2018-04-18 13:59:07 12000
2018-04-18 13:59:07
2018-04-17 20:14:23 15000
2018-04-17 20:14:23
2018-04-12 11:57:09 15000
2018-04-12 11:57:09
2018-03-17 16:45:04 20
2018-03-17 16:45:04
2018-03-05 22:49:26 11000
2018-03-05 22:49:26
2018-03-01 21:47:50 10000
2018-03-01 21:47:50
2018-02-25 14:18:52 179005
2018-02-25 14:18:52
2018-02-13 10:55:25 3
2018-02-13 10:55:25
2018-02-11 09:56:22 10000
2018-02-11 09:56:22
2018-02-04 02:59:14 6000
2018-02-04 02:59:14
2018-01-30 23:29:38
2018-01-27 11:21:40 12000
2018-01-27 11:21:40
2018-01-23 16:53:19 20000
2018-01-23 16:53:19
2018-01-16 18:21:55 2500
2018-01-16 18:21:55
2018-01-15 01:07:09 13000
2018-01-15 01:07:09
2018-01-05 12:12:21 2800
2018-01-05 12:12:21
2018-01-04 14:15:14
2018-01-03 17:11:07 12000
2018-01-03 17:11:07
2017-12-30 09:19:56 12000
2017-12-30 09:19:56
2017-12-28 10:31:47 15000
2017-12-28 10:31:47
2017-12-27 10:45:27 12000
2017-12-27 10:45:27
2017-12-25 14:37:44 3000
2017-12-25 14:37:44
2017-12-17 15:50:23 3000
2017-12-17 15:50:23
2017-12-17 11:28:59
2017-12-15 15:02:54 17500
2017-12-15 15:02:54
2017-12-14 07:57:38 130000
2017-12-14 07:57:38
2017-12-12 17:27:56 11500
2017-12-12 17:27:56
2017-12-10 01:19:22 6000
2017-12-10 01:19:22
2017-12-09 07:33:51 3500
2017-12-09 07:33:51
2017-12-08 21:47:32 15000
2017-12-08 21:47:32
2017-12-06 05:26:42 2000
2017-12-06 05:26:42
2017-12-03 23:25:19 13000
2017-12-03 23:25:19
2017-12-02 22:27:05 9800
2017-12-02 22:27:05
2017-12-02 21:38:40 5
2017-12-02 21:38:40
2017-11-29 01:17:03 1000
2017-11-29 01:17:03
2017-11-27 13:49:34 12000
2017-11-27 13:49:34
2017-11-26 16:58:10 15000
2017-11-26 16:58:10
2017-11-23 15:16:25 10500
2017-11-23 15:16:25
2017-11-22 21:31:58 23000
2017-11-22 21:31:58
2017-11-20 22:30:52 23000
2017-11-20 22:30:52
2017-11-20 17:24:26 10000
2017-11-20 17:24:26
2017-11-19 19:49:29 10000
2017-11-19 19:49:29
2017-11-18 10:01:30 25000
2017-11-18 10:01:30
2017-11-18 09:40:38 10000
2017-11-18 09:40:38
2017-11-17 22:48:50
2017-11-11 10:15:47 25000
2017-11-11 10:15:47
2017-11-10 04:17:31 19000
2017-11-10 04:17:31
2017-11-09 10:10:53 25000
2017-11-09 10:10:53
2017-11-07 17:06:26 19000
2017-11-07 17:06:26
2017-11-07 10:16:23 25000
2017-11-07 10:16:23
2017-11-05 10:44:14 27500
2017-11-05 10:44:14
2017-11-03 20:50:16 none
2017-11-03 12:03:22 27500
2017-11-03 12:03:22
2017-10-30 16:37:04 27500
2017-10-30 16:37:04
2017-10-30 02:35:13 15000
2017-10-30 02:35:13
2017-10-29 19:33:05 13500
2017-10-29 19:33:05
2017-10-29 11:19:49 12000
2017-10-29 11:19:49
2017-10-24 12:03:56 none
2017-10-23 21:13:02 10000
2017-10-23 21:13:02
2017-10-23 13:26:07 none
2017-10-22 15:41:47 21500
2017-10-22 15:41:47
2017-10-08 09:06:15 24500
2017-10-08 09:06:15
2017-10-01 20:23:42 25500
2017-10-01 20:23:42
2017-09-29 23:30:26 25500
2017-09-29 23:30:26
2017-09-27 10:14:24 27000
2017-09-27 10:14:24
2017-09-26 10:00:17 14000
2017-09-26 10:00:17
2017-09-25 21:23:03 20000
2017-09-25 21:23:03
2017-09-25 20:32:29 29000
2017-09-25 20:32:29
2017-09-23 23:48:18 31000
2017-09-23 23:48:18
2017-09-17 13:09:27 20000
2017-09-17 13:09:27
2017-09-16 11:35:27 18000
2017-09-16 11:35:27
2017-09-05 13:05:03 20500
2017-09-05 13:05:03
2017-09-04 04:07:45 16900
2017-09-04 04:07:45
2017-08-22 02:04:44 1000
2017-08-22 02:04:44
2017-08-20 17:28:38 14000
2017-08-20 17:28:38
2017-08-19 15:55:32
2017-08-16 22:43:32 14000
2017-08-16 22:43:32
2017-08-11 13:37:14 1300
2017-08-11 13:37:14
2017-08-09 22:53:04 12
2017-08-09 22:53:04
2017-08-09 18:14:47 none
2017-08-06 12:36:06 1500
2017-08-06 12:36:06
2017-08-03 19:24:11 1500
2017-08-03 19:24:11
2017-07-17 23:06:11 12000
2017-07-17 23:06:11
2017-07-17 20:42:12 none
2017-07-13 14:20:20 13000
2017-07-13 14:20:20
2017-07-12 08:01:51 8000
2017-07-12 08:01:51
2017-07-10 19:53:17 8000
2017-07-10 19:53:17
2017-07-08 14:50:54 12
2017-07-08 14:50:54
2017-07-07 21:56:28 1500016000
2017-07-07 21:56:28
2017-07-05 10:57:31 25000
2017-07-05 10:57:31
2017-07-01 11:45:32 4000
2017-07-01 11:45:32
2017-06-27 15:41:11 10000
2017-06-27 15:41:11
2017-06-20 23:22:15 19000
2017-06-20 23:22:15
2017-06-12 01:01:28 14000
2017-06-12 01:01:28
2017-06-03 09:55:07 5000
2017-06-03 09:55:07
2017-05-25 09:28:08 11000
2017-05-25 09:28:08
2017-05-23 09:50:22 13000
2017-05-23 09:50:22
2017-05-17 00:29:24 18000
2017-05-17 00:29:24
2017-04-24 00:21:26 1000
2017-04-24 00:21:26
2017-04-19 13:17:50 13200
2017-04-19 13:17:50
2017-04-19 12:30:25 20000
2017-04-19 12:30:25
2017-04-10 17:20:34 16000
2017-04-10 17:20:34
0201-04-09 15:01:59 8000
2017-03-28 21:24:06 1000
2017-03-28 21:24:06
2017-03-28 14:38:39 12500
2017-03-28 14:38:39
2017-03-26 18:39:21 13000
2017-03-26 18:39:21
2017-03-24 03:06:43 15000
2017-03-24 03:06:43
2017-03-22 02:29:59 15000
2017-03-22 02:29:59
2017-03-15 00:33:37 15000
2017-03-15 00:33:37
2017-03-11 09:50:08 12900
2017-03-11 09:50:08
2017-03-10 21:30:07 16000
2017-03-10 21:30:07
2017-03-10 09:03:21 13200
2017-03-10 09:03:21
2017-03-09 22:36:15 16000
2017-03-09 22:36:15
2017-03-08 22:24:46 21500
2017-03-08 22:24:46
2017-03-08 10:10:44 14000
2017-03-08 10:10:44
2017-03-08 06:41:53 16000
2017-03-08 06:41:53
2017-03-07 19:00:45 13000
2017-03-07 19:00:45
2017-03-07 08:19:00 16000
2017-03-07 08:19:00
2017-03-06 20:33:56 14000
2017-03-06 20:33:56
2017-03-05 20:36:41 16000
2017-03-05 20:36:41
2017-03-04 17:33:19 16000
2017-03-04 17:33:19
2017-03-04 07:34:54 18000
2017-03-04 07:34:54
2017-03-03 08:17:45 18000
2017-03-03 08:17:45
0201-03-03 00:08:27 12000
2017-03-02 09:04:11 18000
2017-03-02 09:04:11
2017-03-01 08:29:56 18000
2017-03-01 08:29:56
2017-02-26 00:08:54 20000
2017-02-26 00:08:54
2017-02-24 09:45:33 20000
2017-02-24 09:45:33
2017-02-18 11:06:38 30005000
2017-02-18 11:06:38
0201-02-15 20:00:01 15500
2017-02-15 07:08:58 16000
2017-02-15 07:08:58
2017-02-14 20:53:24 27000
2017-02-14 20:53:24
2017-02-14 01:42:51 16000
2017-02-14 01:42:51
2017-02-13 13:26:39 15000
2017-02-13 13:26:39
2017-02-12 23:19:13 26000
2017-02-12 23:19:13
2017-02-12 21:22:56 1500016000
2017-02-12 21:22:56
2017-02-11 23:07:20 2100023000
2017-02-11 23:07:20
2017-02-11 11:02:36 27500
2017-02-11 11:02:36
2017-02-08 22:17:58 13500
2017-02-08 22:17:58
2017-02-08 14:29:11 25000
2017-02-08 14:29:11
2017-02-07 18:59:45 20000
2017-02-07 18:59:45
2017-02-07 16:46:14 20000
2017-02-07 16:46:14
2017-02-06 19:09:46 2100023000
2017-02-06 19:09:46
2017-02-06 00:06:36 20000
2017-02-06 00:06:36
2017-02-05 22:26:47
2017-02-05 01:36:59 1200015000
2017-02-05 01:36:59
2017-02-03 22:49:59 none
2017-02-03 11:23:01 2000022000
2017-02-03 11:23:01
2017-02-02 07:25:28 28000
2017-02-02 07:25:28
2017-02-01 17:23:39 2000022000
2017-02-01 17:23:39
2017-01-30 21:50:48 29000
2017-01-30 21:50:48
2017-01-28 21:24:16 29000
2017-01-28 21:24:16
2017-01-27 02:04:16 15000
2017-01-27 02:04:16
2017-01-26 14:07:59 15000
2017-01-26 14:07:59
2017-01-23 22:23:03 26000
2017-01-23 22:23:03
2017-01-19 22:41:45 17999
2017-01-19 22:41:45
2017-01-07 14:32:55 22000
2017-01-07 14:32:55
2017-01-02 09:50:53
2016-12-25 21:33:27 19000
2016-12-25 21:33:27
2016-12-12 12:57:20 23000
2016-12-12 12:57:20
-1 none
2016-12-09 14:49:30
2016-12-09 08:57:29 23000
2016-12-09 08:57:29
2016-12-09 00:26:44 15000
2016-12-09 00:26:44
2016-12-07 00:13:04 15000
2016-12-07 00:13:04
2016-12-05 02:43:49 16000
2016-12-05 02:43:49
2016-11-29 20:48:46 23000
2016-11-29 20:48:46
2016-11-26 02:10:31 none
2016-11-18 22:59:17 14000
2016-11-18 22:59:17
2016-11-18 22:41:03 23000
2016-11-18 22:41:03
2016-11-15 17:43:23 14000
2016-11-15 17:43:23
2016-11-15 09:43:06
2016-11-12 10:40:42 25000
2016-11-12 10:40:42
2016-11-10 00:43:08
2016-11-05 12:37:07 6500
2016-11-05 12:37:07
2016-10-30 14:52:44 20000
2016-10-30 14:52:44
2016-10-29 10:24:07 20000
2016-10-29 10:24:07
0201-10-28 20:19:37 18000
2016-10-28 10:51:32 17000
2016-10-28 10:51:32
2016-10-27 03:23:50 900
2016-10-27 03:23:50
2016-10-26 20:09:07 20000
2016-10-26 20:09:07
2016-10-25 07:41:28 20000
2016-10-25 07:41:28
2016-10-24 00:17:03 25000
2016-10-24 00:17:03
2016-10-23 18:44:31 20000
2016-10-23 18:44:31
2016-10-22 21:06:38 20000
2016-10-22 21:06:38
2016-10-21 22:14:11 20000
2016-10-21 22:14:11
2016-10-18 22:50:55
2016-10-18 20:19:33 22000
2016-10-18 20:19:33
2016-10-15 17:10:36 17000
2016-10-15 17:10:36
2016-10-11 22:39:40 26000
2016-10-11 22:39:40
2016-10-08 21:28:26 15000
2016-10-08 21:28:26
2016-10-02 10:16:37 17000
2016-10-02 10:16:37
2016-09-27 20:41:58 19500
2016-09-27 20:41:58
2016-09-26 00:59:42
2016-09-25 23:23:57
2016-09-25 22:02:50 20000
2016-09-25 22:02:50
2016-09-25 20:23:07
2016-09-24 19:52:03
2016-09-24 14:43:58 26000
2016-09-24 14:43:58
2016-09-23 17:53:24
2016-09-23 07:11:02 20000
2016-09-23 07:11:02
2016-09-21 14:14:28
2016-09-20 23:49:15
2016-09-20 16:48:26
2016-09-20 09:19:12 20000
2016-09-20 09:19:12
2016-09-09 13:13:31 26000
2016-09-09 13:13:31
2016-08-31 23:26:41 33500
2016-08-31 23:26:41
2016-08-29 13:49:47
2016-08-27 00:06:57 16000
2016-08-27 00:06:57
0201-08-07 20:08:27 23000
0201-08-05 21:42:37 23000
2016-07-29 00:28:16 12000
2016-07-29 00:28:16
2016-07-25 23:52:03 2000023000
2016-07-25 23:52:03
2016-07-19 08:47:10 15000
2016-07-19 08:47:10
2016-07-19 00:02:06 2000023000
2016-07-19 00:02:06
2016-07-06 16:58:52 1850020500
2016-07-06 16:58:52
2016-07-06 03:21:31 21000
2016-07-06 03:21:31
2016-06-30 10:10:48 30000
2016-06-30 10:10:48
2016-06-28 07:03:04 21000
2016-06-28 07:03:04
2016-06-26 20:44:02 22000
2016-06-26 20:44:02
2016-06-26 11:47:08 none
2016-06-25 11:57:12 23000
2016-06-25 11:57:12
2016-06-24 02:23:51 23000
2016-06-24 02:23:51
2016-06-17 10:46:12 none
2016-06-14 09:13:40 25600
2016-06-14 09:13:40
2016-06-13 09:46:12 26500
2016-06-13 09:46:12
2016-06-06 22:28:11 4000
2016-06-06 22:28:11
2016-06-06 22:12:37 31800
2016-06-06 22:12:37
2016-06-06 11:43:41 31500
2016-06-06 11:43:41
2016-06-05 09:29:32 31000
2016-06-05 09:29:32
2016-06-04 17:01:10 none
2016-05-31 14:30:31 none
2016-05-31 00:38:23 none
2016-05-29 01:46:42 16000
2016-05-29 01:46:42
2016-05-27 12:00:14 30000
2016-05-27 12:00:14
2016-05-27 10:50:42 none
2016-05-25 22:29:58 18000
2016-05-25 22:29:58
0201-05-24 22:39:15 18500
2016-05-23 23:53:25 20000
2016-05-23 23:53:25
2016-05-21 16:47:11 none
2016-05-18 21:19:45 none
2016-05-18 12:47:49
2016-05-17 11:25:22 25000
2016-05-17 11:25:22
2016-05-17 10:52:24 10000
2016-05-17 10:52:24
2016-05-16 21:52:09 45000
2016-05-16 21:52:09
2016-05-16 20:17:21 30000
2016-05-16 20:17:21
2016-05-16 10:16:20 none
2016-05-14 13:56:45 30000
2016-05-14 13:56:45
2016-05-09 11:37:20 none
2016-05-07 14:11:20 20000
2016-05-07 14:11:20
2016-05-05 22:10:37
2016-05-03 17:22:27
2016-04-29 03:41:17 25999
2016-04-29 03:41:17
2016-04-20 16:59:33 33000
2016-04-20 16:59:33
2016-04-18 11:26:23 21000
2016-04-18 11:26:23
2016-04-17 10:14:20 none
2016-04-16 13:35:25 none
2016-04-16 10:56:12 21000
2016-04-16 10:56:12
2016-04-13 22:43:08 none
2016-04-12 00:25:43 29500
2016-04-12 00:25:43
2016-04-02 22:12:26 none
2016-04-02 13:03:44 23000
2016-04-02 13:03:44
2016-03-29 10:35:53 22000
2016-03-29 10:35:53
2016-03-28 21:22:05 24500
2016-03-28 21:22:05
2016-03-24 20:19:50 20000
2016-03-24 20:19:50
2016-03-23 11:28:13 17500
2016-03-23 11:28:13
2016-03-22 13:21:49 40000
2016-03-22 13:21:49
2016-03-21 23:05:53 20000
2016-03-21 23:05:53
2016-03-20 23:16:17 25500
2016-03-20 23:16:17
2016-03-19 01:02:29 25500
2016-03-19 01:02:29
2016-03-18 16:23:03
2016-03-15 11:44:17 26000
2016-03-15 11:44:17
2016-03-14 14:47:22 24000
2016-03-14 14:47:22
0201-03-13 00:44:40 26000
2016-03-12 13:19:43 32000
2016-03-12 13:19:43
2016-03-11 00:47:52 26000
2016-03-11 00:47:52
2016-03-09 21:42:14 27000
2016-03-09 21:42:14
2016-03-08 15:01:29 32000
2016-03-08 15:01:29
2016-03-07 00:06:39 26300
2016-03-07 00:06:39
2016-03-06 21:46:45 32000
2016-03-06 21:46:45
2016-03-05 23:08:33 38888
2016-03-05 23:08:33
2016-03-05 11:02:59 20000
2016-03-05 11:02:59
2016-03-02 23:29:50 26300
2016-03-02 23:29:50
2016-02-29 22:48:50 26300
2016-02-29 22:48:50
2016-02-28 03:16:27 26300
2016-02-28 03:16:27
2016-02-25 13:35:57 none
2016-02-25 01:51:25 none
2016-02-25 01:18:07 26500
2016-02-25 01:18:07
2016-02-23 01:18:30 26500
2016-02-23 01:18:30
2016-02-22 10:20:33 28000
2016-02-22 10:20:33
2016-02-21 15:04:39 26500
2016-02-21 15:04:39
2016-02-21 11:16:23 29000
2016-02-21 11:16:23
2016-02-20 17:10:24 29500
2016-02-20 17:10:24
2016-02-20 15:18:47 26800
2016-02-20 15:18:47
2016-02-19 23:22:37 31000
2016-02-19 23:22:37
2016-02-19 10:26:02 26000
2016-02-19 10:26:02
2016-02-18 23:53:50 27000
2016-02-18 23:53:50
2016-02-17 21:11:33 27000
2016-02-17 21:11:33
2016-02-17 00:46:51 27000
2016-02-17 00:46:51
2016-02-16 16:46:23 21000
2016-02-16 16:46:23
2016-02-15 22:32:37 27000
2016-02-15 22:32:37
2016-02-15 17:12:30 22500
2016-02-15 17:12:30
2016-02-15 12:51:18 none
2016-02-14 22:33:44 27000
2016-02-14 22:33:44
2016-02-13 14:33:41 27500
2016-02-13 14:33:41
2016-02-12 21:39:32 27500
2016-02-12 21:39:32
2016-02-08 12:24:45 22000
2016-02-08 12:24:45
2016-02-08 09:59:01 none
2016-02-04 15:37:01
2016-02-03 00:13:48 21000
2016-02-03 00:13:48
2016-02-02 21:34:39 25000
2016-02-02 21:34:39
2016-02-01 22:10:45 22000
2016-02-01 22:10:45
2016-02-01 21:09:36
2016-01-31 21:57:00 24000
2016-01-31 21:57:00
2016-01-31 20:13:31 23000
2016-01-31 20:13:31
2016-01-30 20:57:55 24000
2016-01-30 20:57:55
2016-01-30 10:51:27 23000
2016-01-30 10:51:27
2016-01-30 10:16:03
2016-01-29 18:04:48 1800021000
2016-01-29 18:04:48
0201-01-28 19:10:39
2016-01-27 11:22:08
2016-01-27 02:45:06 28500
2016-01-27 02:45:06
2016-01-26 22:29:34 29500
2016-01-26 22:29:34
2016-01-24 10:17:06 24000
2016-01-24 10:17:06
2016-01-22 20:02:54
2016-01-20 17:26:12 21000
2016-01-20 17:26:12
2016-01-17 13:16:10 17000
2016-01-17 13:16:10
2016-01-16 11:12:06 17888
2016-01-16 11:12:06
2016-01-14 21:38:24 2700
2016-01-14 21:38:24
2016-01-12 14:28:11 750
2016-01-12 14:28:11
2016-01-10 01:29:01
-1 29000
2016-01-09 12:32:57 21000
2016-01-09 12:32:57
2016-01-08 22:33:03 29000
2016-01-08 22:33:03
2016-01-08 00:25:49 28000
2016-01-08 00:25:49
2016-01-06 02:09:21 29000
2016-01-06 02:09:21
2016-01-05 18:22:40 21000
2016-01-05 18:22:40
2016-01-05 10:33:41 28500
2016-01-05 10:33:41
2016-01-03 01:10:09 none
2016-01-01 22:28:55 28500
2016-01-01 22:28:55
2015-12-31 14:14:07 29000
2015-12-31 14:14:07
2015-12-26 10:39:48 none
2015-12-24 21:22:27
2015-12-23 15:58:30 330000437000
2015-12-23 15:58:30
2015-12-21 09:53:14 22000
2015-12-21 09:53:14
2015-12-20 23:17:07 30500
2015-12-20 23:17:07
2015-12-20 01:35:48
2015-12-15 17:44:38
2015-12-11 02:32:16 28000
2015-12-11 02:32:16
2015-12-08 14:48:12 23500
2015-12-08 14:48:12
2015-12-08 01:12:45 30000
2015-12-08 01:12:45
2015-12-07 00:38:32 none
2015-12-02 15:03:59 32000
2015-12-02 15:03:59
2015-12-02 13:03:46 1800
2015-12-02 13:03:46
2015-12-02 09:42:24 18000
2015-12-02 09:42:24
2015-11-30 14:29:19 20000
2015-11-30 14:29:19
2015-11-29 21:50:16 2800030000
2015-11-29 21:50:16
2015-11-27 12:21:44 10005000
2015-11-27 12:21:44
2015-11-26 09:06:28 31000
2015-11-26 09:06:28
2015-11-24 19:23:49 25000
2015-11-24 19:23:49
2015-11-21 15:50:37 19000
2015-11-21 15:50:37
2015-11-18 18:47:23 none
2015-11-17 13:23:11 1500
2015-11-17 13:23:11
2015-11-16 13:09:26
2015-11-12 08:59:25 30000
2015-11-12 08:59:25
2015-11-11 09:47:14 30000
2015-11-11 09:47:14
2015-11-10 19:20:00 none
2015-11-09 21:56:43 2500016000
2015-11-09 21:56:43
2015-11-08 20:31:02 31500
2015-11-08 20:31:02
2015-11-07 23:42:16
2015-11-07 10:36:17 none
2015-11-06 11:29:29 2000
2015-11-06 11:29:29
2015-11-05 02:21:30 none
2015-11-04 10:42:28 3500
2015-11-04 10:42:28
2015-11-04 09:46:10 none
2015-11-02 22:01:49 2500016000
2015-11-02 22:01:49
2015-10-29 01:06:25 36000
2015-10-29 01:06:25
2015-10-29 00:26:41
2015-10-28 23:47:36 26000
2015-10-28 23:47:36
2015-10-27 23:19:15 30000
2015-10-27 23:19:15
2015-10-26 12:34:41 31000
2015-10-26 12:34:41
2015-10-25 13:46:42
2015-10-22 18:16:15 6000
2015-10-22 18:16:15
2015-10-21 20:32:31
2015-10-17 23:01:04 20000
2015-10-17 23:01:04
2015-10-17 17:48:03 none
2015-10-17 01:20:33 25000
2015-10-17 01:20:33
2015-10-10 12:43:23 39000
2015-10-10 12:43:23
2015-10-08 12:26:41 29000
2015-10-08 12:26:41
2015-10-07 01:34:23 25000
2015-10-07 01:34:23
2015-10-01 22:21:58 35000
2015-10-01 22:21:58
2015-09-30 20:00:41 27500
2015-09-30 20:00:41
2015-09-30 02:17:04
2015-09-28 11:38:59 23000
2015-09-28 11:38:59
2015-09-27 10:48:22
2015-09-26 22:50:12 25000
2015-09-26 22:50:12
2015-09-24 20:59:47 4500
2015-09-24 20:59:47
2015-09-22 10:18:52 27000
2015-09-22 10:18:52
2015-09-19 21:43:19 none
2015-09-17 23:22:52 none
2015-09-14 01:05:57 2500032000
2015-09-14 01:05:57
0201-09-13 19:35:13 none
2015-09-13 17:12:05 26500
2015-09-13 17:12:05
2015-09-10 19:39:28 47000
2015-09-10 19:39:28
2015-09-09 08:44:44 30000
2015-09-09 08:44:44
2015-09-06 13:59:56 32000
2015-09-06 13:59:56
2015-09-06 12:40:34 35000
2015-09-06 12:40:34
2015-09-03 22:57:19 1000
2015-09-03 22:57:19
2015-09-03 14:54:47 32000
2015-09-03 14:54:47
2015-09-02 01:11:23 35000
2015-09-02 01:11:23
2015-08-31 22:02:15 33000
2015-08-31 22:02:15
0201-08-29 11:44:02 30000
2015-08-27 20:28:25 none
2015-08-23 15:12:33 31000
2015-08-23 15:12:33
2015-08-19 20:37:14 36000
2015-08-19 20:37:14
2015-08-17 16:48:56 36000
2015-08-17 16:48:56
2015-08-13 13:14:50 28500
2015-08-13 13:14:50
-1 30000
2015-08-03 22:05:19 36000
2015-08-03 22:05:19
2015-07-30 10:26:45 20000
2015-07-30 10:26:45
2015-07-29 00:32:38 2000035000
2015-07-29 00:32:38
2015-07-28 23:06:32 2000035000
2015-07-28 23:06:32
2015-07-27 00:32:22 2000035000
2015-07-27 00:32:22
2015-07-25 05:40:47 2000035000
2015-07-25 05:40:47
2015-07-23 21:49:25 25000
2015-07-23 21:49:25
2015-07-22 17:30:23 37000
2015-07-22 17:30:23
2015-07-21 13:09:36 30000
2015-07-21 13:09:36
2015-07-15 04:26:24 none
2015-07-15 00:06:44 none
2015-07-13 12:08:22 30000
2015-07-13 12:08:22
2015-07-11 21:14:31 25000
2015-07-11 21:14:31
2015-07-11 14:54:02 30000
2015-07-11 14:54:02
2015-07-11 10:38:06 none
2015-07-08 01:19:36 25000
2015-07-08 01:19:36
2015-06-30 14:26:13 18000
2015-06-30 14:26:13
2015-06-24 00:59:56 none
2015-06-23 17:25:11
2015-06-19 17:24:34 24900
2015-06-19 17:24:34
2015-06-19 15:42:19 25000
2015-06-19 15:42:19
2015-06-10 08:59:42 27000
2015-06-10 08:59:42
2015-06-08 16:24:27 27000
2015-06-08 16:24:27
2015-06-06 01:38:59 26500
2015-06-06 01:38:59
2015-05-22 22:50:19 25000
2015-05-22 22:50:19
2015-05-22 14:11:25 50000
2015-05-22 14:11:25
2015-05-21 09:23:47
2015-05-20 23:01:16
2015-05-18 21:40:09
2015-05-17 23:06:34
2015-05-17 20:24:27 none
2015-05-16 15:34:28
2015-05-12 10:48:43 none
2015-05-12 08:55:08 none
2015-05-07 14:27:04 2850026500
2015-05-07 14:27:04
2015-05-03 21:41:41 55000
2015-05-03 21:41:41
2015-05-02 08:33:35 50000
2015-05-02 08:33:35
2015-04-26 07:36:52 25000
2015-04-26 07:36:52
2015-04-24 23:59:23 28000
2015-04-24 23:59:23
|
03.power_consumption/10.energy-forecasting-CNN.ipynb | ###Markdown
Forecasting using CNN- One-step recursive forecasting
###Code
import pandas as pd
import numpy as np
import os
import time
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# preprocessing methods
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
# accuracy measures and data spliting
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
# deep learning libraries
from keras.models import Input, Model
from keras.models import Sequential
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
from keras import layers
from keras import losses
from keras import optimizers
from keras import metrics
from keras import callbacks
from keras import initializers
from keras import regularizers
import warnings
warnings.filterwarnings('ignore')
plt.style.use('seaborn')
plt.rcParams['figure.figsize'] = 15, 7
###Output
_____no_output_____
###Markdown
1. Data import
###Code
DATADIR = '../data/power-consumption/'
MODELDIR = '../checkpoints/power/ffnn/model/'
data = pd.read_csv(os.path.join(DATADIR, 'processed_data.csv'))
data.head()
###Output
_____no_output_____
###Markdown
2. Train test split
###Code
y = data[['Global_active_power']].copy()
X = data.drop(columns=['date', 'Global_active_power', 'Median_active_power', 'Lagged_active_power', 'median_residual'], axis=1)
# last 40 weeks data for testing
test_size = np.int16(7*1)
train_size = X.shape[0] - test_size
# scaling weekly sales, dependent variable
y_scaler = StandardScaler()
y_scaled = y_scaler.fit_transform(y.values)
# scaling numerical column from features
x_scaler = StandardScaler()
X_scaled = x_scaler.fit_transform(X.values)
X_train, X_test = X_scaled[:train_size-1, :], X_scaled[train_size:, :]
y_train, y_test = y_scaled[:train_size-1, :], y_scaled[train_size:, :]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
###Output
_____no_output_____
###Markdown
4. Model Builing
###Code
timesteps = 1
features = X_train.shape[1]
X_train = np.reshape(X_train, (X_train.shape[0], timesteps, features))
X_test = np.reshape(X_test, (X_test.shape[0], timesteps, features))
X_train.shape, X_test.shape, y_train.shape, y_test.shape
def model_construction():
# xavier initializer
xavier = initializers.glorot_normal()
model = Sequential()
model.add(Conv1D(8, kernel_size=3, activation='relu', padding='same', strides=1, kernel_initializer=xavier,
input_shape=(timesteps, features)))
model.add(Conv1D(16, kernel_size=3, activation='relu', padding='same', strides=1, kernel_initializer=xavier))
model.add(Conv1D(16, kernel_size=3, activation='relu', padding='same', strides=1, kernel_initializer=xavier))
# model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(16, activation='relu', kernel_initializer=xavier))
model.add(Dense(1, kernel_initializer=xavier))
model.summary()
return model
def traning(model, X_train, y_train, MODELDIR):
start = time.time()
model.compile(loss=losses.mean_squared_error, optimizer=optimizers.Adam(), metrics=[metrics.mean_absolute_error])
callbacks_list = [callbacks.ReduceLROnPlateau(monitor='loss', factor=0.2,
patience=5, min_lr=0.001)]
history = model.fit(X_train, y_train,
epochs=50,
batch_size=32,
verbose=0,
shuffle=False,
callbacks=callbacks_list
)
y_train_pred = model.predict(X_train)
# recursive model fitting
onestep_pred = []
for i in range(X_test.shape[0]):
model.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0, shuffle=False)
pred = model.predict(X_test[i, :, :].reshape(1, 1, features))
onestep_pred.append(pred)
tempX = np.vstack((X_train, X_test[i, :, :].reshape(1, 1, features)))
X_train = tempX.copy()
arr_pred = np.array([pred]).reshape(-1, 1)
tempY = np.vstack((y_train, arr_pred))
y_train = tempY.copy()
y_test_pred = np.array(onestep_pred).reshape(-1, 1)
# directory for saving model
if os.path.exists(MODELDIR):
pass
else:
os.makedirs(MODELDIR)
model.save(os.path.join(MODELDIR, 'cnn-v2.h5'))
model.save_weights(os.path.join(MODELDIR, 'cnn-v2-weights.h5'))
end = time.time()
time_taken = np.round((end-start), 3)
print(f'Time taken to complete the process: {time_taken} seconds')
return history, y_train_pred, y_test_pred
model = model_construction()
history, y_train_pred, y_test_pred = traning(model, X_train, y_train, MODELDIR)
###Output
WARNING:tensorflow:From /Users/manish/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py:497: calling conv1d (from tensorflow.python.ops.nn_ops) with data_format=NHWC is deprecated and will be removed in a future version.
Instructions for updating:
`NHWC` for data_format is deprecated, use `NWC` instead
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1d_1 (Conv1D) (None, 1, 8) 248
_________________________________________________________________
conv1d_2 (Conv1D) (None, 1, 16) 400
_________________________________________________________________
conv1d_3 (Conv1D) (None, 1, 16) 784
_________________________________________________________________
flatten_1 (Flatten) (None, 16) 0
_________________________________________________________________
dense_1 (Dense) (None, 16) 272
_________________________________________________________________
dense_2 (Dense) (None, 1) 17
=================================================================
Total params: 1,721
Trainable params: 1,721
Non-trainable params: 0
_________________________________________________________________
Time taken to complete the process: 23.001 seconds
###Markdown
5. Model evaluation
###Code
def model_evaluation(y_train, y_test, y_train_pred, y_test_pred):
y_train_inv = y_scaler.inverse_transform(y_train)
y_test_inv = y_scaler.inverse_transform(y_test)
y_train_pred_inv = y_scaler.inverse_transform(y_train_pred)
y_test_pred_inv = y_scaler.inverse_transform(y_test_pred)
# MAE and NRMSE calculation
train_rmse = np.sqrt(mean_squared_error(y_train_inv, y_train_pred_inv))
train_mae = np.round(mean_absolute_error(y_train_inv, y_train_pred_inv), 3)
train_nrmse = np.round(train_rmse/np.std(y_train_inv), 3)
test_rmse = np.sqrt(mean_squared_error(y_test_inv, y_test_pred_inv))
test_mae = np.round(mean_absolute_error(y_test_inv, y_test_pred_inv), 3)
test_nrmse = np.round(test_rmse/np.std(y_test_inv), 3)
print(f'Training MAE: {train_mae}')
print(f'Trainig NRMSE: {train_nrmse}')
print(f'Test MAE: {test_mae}')
print(f'Test NRMSE: {test_nrmse}')
return y_train_inv, y_train_pred_inv, y_test_inv, y_test_pred_inv
y_train_inv, y_train_pred_inv, y_test_inv, y_test_pred_inv = model_evaluation(y_train, y_test, y_train_pred, y_test_pred)
plt.plot(y_train_inv, label='actual')
plt.plot(y_train_pred_inv, label='predicted')
plt.ylabel('kWatt')
plt.xlabel('index')
plt.title('Actual vs predicted on training data using FFNN', fontsize=14)
plt.legend()
plt.tight_layout()
plt.show()
plt.plot(y_test_inv, label='actual')
plt.plot(y_test_pred_inv, label='predicted')
plt.ylabel('kWatt')
plt.xlabel('index')
plt.title('Actual vs predicted on test data using recursive CNN', fontsize=14)
plt.legend()
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
notebooks/lecture_06_oop.ipynb | ###Markdown
Table of ContentsUrban Informatics and VisualizationBeyond Expressions, Procedures, and Scripts2.1 Abstraction-Module->-Class->-Method" data-toc-modified-id="2.1.1-Package->-Module->-Class->-Method-1.1">2.1.1 Package > Module > Class > Method2.1.2 Namespaces and Scopes2.2 Functions2.2.1 return values2.2.2 A more complex examplePractice:2.2.3 "Lambda" Functions2.2.4 The map() function2.3 class objects2.3.1 Instantiate a class2.3.2 Class attributes2.4 Class methods2.4.1 the __init__() method2.4.2 Defining your own class methods2.4.3 Know thy self CYPLAN255 Urban Informatics and Visualization HIT RECORD and TRANSCRIBE Lecture 06 -- Object-Oriented Programming Beyond Expressions, Procedures, and Scripts*******February 9, 2022 Agenda1. Announcements2. Notebook "demo"3. For next time4. Questions 1. Announcements 1. Updated GitHub cheat sheet2. Assignment 1: Due3. Assignment 2: Released tonight 2. Object-Oriented Programming So far you have learned the logic of programming using what is referred to as procedure-oriented programming. Python lets you do this easily. But as the programs you write get to be more complex, modern programming guidance is to use what is called an object-oriented programming style, sometimes referred to as object-oriented programming (OOP).While procedural programming can suffice for writing short, simple programs, an object-oriented approach becomes increasingly valuable as your program grows in size and complexity. The more data and functions your code contains, the more important it is to arrange them into logical subgroups (classes) making sure that 1) related data and functions are grouped together and 2) unrelated data and functions do _not_ interfere with each other. Modular code is easier to read, modify, and reuse – and code reuse is valuable because it reduces development time.
###Code
type(print)
###Output
_____no_output_____
###Markdown
2.1 AbstractionThe key to object-oriented programming is a concept called **abstraction**.Just like we've defined **variables** to make it easier to reuse data, we can also define **Functions** which instead of data store statements, expressions, conditional logic, loops, etc. This makes it easier to apply the same _procedures_ to different data. We call this "abstraction" because a function (e.g. `print()`) is a generic _representation_ of a much more complex procedure which the user doesn't necessarily know or need to know about. By defining your procedure as a Function, you not only protect the user from having to worry about the details of that procedure, but you also prevent them from altering it. 2.1.1 Package > Module > Class > MethodSimilarly, we'll define **class** objects, which organize groups of related functions and variables together. Not all functions belong to classes, but when they do we call them **methods** of that class. Variables that are stored in a class are called **attributes** of that class.Going even further, we might want to organize a collection of classes, Functions, and variables into a **module**. Modules are stored on disk as their own .py files, and can be imported by other Python modules, scripts, or interactive sessions (e.g. Notebooks). Lastly we have Python **libraries** or **packages**, which are basically just a collection of **modules** with some addition instructions for telling Python how to install it. 2.1.2 Namespaces and ScopesWe have previously seen that `dir()` is a helpful function which, if we do not provide an argument, will print out the list of all of the names of the objects which are defined in our **namespace**.
###Code
a = 1
b = 2
dir()
###Output
_____no_output_____
###Markdown
A namespace is like a dictionary which maps names of objects to the objects themselves. Every module, method, or class object that we define will have its own namespace, called a **local** namespace. The local namespace can be interrogated with the built-in `locals()` function.
###Code
locals()
###Output
_____no_output_____
###Markdown
This looks a lot like what we saw when we ran the `dir()` command. That's because when you run `dir()`, you are basically seeing the keys of the `locals` dict. Each namespace has something called a **scope** which defines the region of a Python program from which that namespace is accessible.These concepts are related to abstraction because objects that are encapsulated by other objects automatically inherit the namespaces of their encapsulating objects. Another way of saying this is that the scope of an object's namespace extends to all of the namespaces of objects that are defined within that object. For example, the namespace of a class has a scope which extends to all methods (functions) defined in that class. The opposite is not true, however. If a variable is defined within a method, the encapsulating class object does not have access to that variable. There is also a **global** namespace which defines the namespace that is not encapsulated by any other. Any object in the global namespace is accessible from any other namespace in the program. When our Python interpreter is executing code from an interactive session (like this one) rather than a class or function, its local namespace is also the global namespace. Try for yourself:
###Code
locals() == globals()
###Output
_____no_output_____
###Markdown
This will all make more sense once you see how classes and functions are defined. 2.2 FunctionsYou can group programming steps into functions to be able to reuse them easily and flexibly, on different inputs.Note the syntax below. A function definition begins with the word `def`. It then has a name, and then parentheses containing one or more elements called **arguments**. Arguments define generic, descriptive, placeholder names for the values that you intend to pass to the function at runtime. Notice also the indentation of the block of code defining the procedure.```def function_name(args): ```The general syntax for a function looks a bit like the syntax for an `if` statement. Recall the example of a chained conditional we covered last time:
###Code
x = 11
if x < 10:
print('x is less than 10')
elif x == 10:
print('x equals 10')
else:
print('x is greater than 10')
###Output
_____no_output_____
###Markdown
![](images/loss.jpg) Let's try encapsulating these statements with a function
###Code
def compare_to_10(value):
if value < 10:
print(value, 'is less than 10')
elif value == 10:
print(value, 'equals 10')
else:
print(value, 'is greater than 10')
###Output
_____no_output_____
###Markdown
A few things to notice here:- Running the cell above does not produce any output. It just defines the function and adds it to our **namespace**, which makes it available to call later on.- Function names should always be **verbs** which describe what the function does (e.g. `do_this()`, `get_that()`, etc.). They should also always be all lower case, with underscores separating words as needed for legibility.- There is nothing special about the word `value` above. It's a placeholder for whatever argument we will eventually pass into the function when we're ready to use it. It defines a variable that does not yet have a value, and will take on the value of whatever argument we pass into the function when we call it. It will only be defined in the _local_ namespace of the function. It works kind of like `item` in the following for loop:```for item in list: print(item)``` To call a function, just use its name as if it were a built in function and use parentheses to pass the function a value or set of values as **arguments**. Just pay attention - the function doesn't exist until you initialize it by running the code that defines it. Let's try calling the function now
###Code
compare_to_10(9)
###Output
_____no_output_____
###Markdown
The above approach to calling the method isn't that different than the original use case. But now we can call the function from a for loop, and this is where you begin to see the value of functions in automating a process.
###Code
for i in range(20):
compare_to_10(i)
###Output
_____no_output_____
###Markdown
![](images/podracing.gif) 2.2.1 `return` valuesYour function can produce output data that you can use from wherever you've called that function.Note the use of `return` here. `return` not only tells your function which results to return, but it also send a signal to your function that the function is "done". Kind of like `break` in a loop.
###Code
def greater_than(x, y):
if x > y:
return True
else:
return False
greater_than('A', 'B')
###Output
_____no_output_____
###Markdown
One of the most practical uses of using return is that you can assign the result to a variable and continue working with it.
###Code
z = greater_than(3, 5)
z
###Output
_____no_output_____
###Markdown
2.2.2 A more complex exampleHere is a more complex function that calculates a Fibonacci series up to $n$. Fibonacci series have the property that the sum of two adjacent numbers in the list equals the next value in the list. Figuring out how to write functions to solve a problem requires analyzing the problem and, if it is complicated, breaking it down to smaller steps. In this case, to create a Fibonnaci series we should:1. Initialize two variables with the first two values, 0 and 12. create a while loop to iterate over a sequence of values up to $n$2. at each iteration, assign the second value to the first variable and assign the sum of the two to the second variable Note that when a function does not explicitly return anything, Python will return `None`. This is equivalent to the following:
###Code
def create_fibonacci(n):
a, b = 0, 1 # use commas to assign multiple variables in one line
while a < n:
print(a, end=' ')
a, b = b, a + b
return
create_fibonacci(1000)
###Output
_____no_output_____
###Markdown
We can add documentation to functions by adding a statement in triple quotation marks following the `def` statement. These are called **docstring**, which Python can use to generate documentation for a function.
###Code
def create_fibonacci(n):
"""Print a Fibonacci series up to n, where each element
is the sum of the preceding two elements in the sequence.
"""
a, b = 0, 1
print(list(locals().keys()))
while a < n:
print(a, end=' ')
a, b = b, a + b
create_fibonacci(1000)
###Output
_____no_output_____
###Markdown
Let's modify the function to create a list, return it, and assign it to a new variable.
###Code
def create_fibonacci(n):
"""Print a Fibonacci series up to n, where each element
is the sum of the preceding two elements in the sequence.
"""
result = []
a, b = 0, 1
print(list(locals().keys()))
while a < n:
result.append(a)
a, b = b, a + b
return result
f = create_fibonacci(1000)
# print the doctring for a function
print(create_fibonacci.__doc__)
help(create_fibonacci)
###Output
_____no_output_____
###Markdown
Practice:Write a Python function named `countdown()` that:1. accepts an integer as an argument2. prints that integer and counts down to zero from there.Test it by passing it a value of 9. 2.2.3 "Lambda" Functions One way to write small functions in a compact way that avoids the `def` statement is to use the **lambda** function. Lambda takes a number of parameters and an expression combining these parameters, and creates an anonymous function that returns the value of the expression. Lambda functions come very handy when operating with lists or other iterables. These function are defined by the keyword lambda followed by the variables, a colon and the respective expression.
###Code
multiply = lambda x: x * x
multiply(7)
###Output
_____no_output_____
###Markdown
The lamda function above is equivalent to the following code:
###Code
def multiply(x):
result = x * x
return result
multiply(7)
###Output
_____no_output_____
###Markdown
Here's an example lambda function which takes two arguments
###Code
add = lambda x, y: x + y
add(3, 4)
###Output
_____no_output_____
###Markdown
This is just an alternative way to "def statement" and defining a function in the usual way.
###Code
def add(x, y):
result = x + y
return result
###Output
_____no_output_____
###Markdown
Here is an example of embedding a boolean test in a lambda
###Code
check_even = lambda x: x % 2 == 0
check_even(9)
###Output
_____no_output_____
###Markdown
2.2.4 The `map()` function`map()` is a function which evaluates a function on each item of a sequence. To achieve this, it takes other functions as an argument.
###Code
ls = list(range(2, 10))
list(map(str, ls))
str(ls)
###Output
_____no_output_____
###Markdown
When combined with lambda functions, `map()` can make for some very powerful one-liners
###Code
list(map(lambda x: x * x, list(range(2, 10))))
###Output
_____no_output_____
###Markdown
Notice that without `list()`, map returns a `map` object, similar to how `range()` produces a `range` object
###Code
eg = map(lambda x: x + 2, ls)
print(ls)
print(eg)
print(list(eg))
ls = list(range(10))
eg3 = map(lambda x: x % 2 == 0, ls)
print(ls)
print(list(eg3))
###Output
_____no_output_____
###Markdown
2.3 `class` objects Let's create a simple class for UC Berkeley employees.
###Code
class Employee:
pass # pass is kind of like "continue" in a loop, it does nothing.
###Output
_____no_output_____
###Markdown
Above, a class object named "Employee" is declared. It kind of looks like a function definition except instead of `def` you use `class`, and you don't return anything. Think of a `class` like a blueprint or a recipe for creating objects that have a predefined set of attributes and methods for manipulating those attributes. You can use that blueprint to create lots of different versions or **instances** of that class. 2.3.1 Instantiate a class Each unique employee that we create using our "Employee" class will be an instance of that class. For instance, employees 1 and 2. To create an instance all you need to do is:
###Code
emp_1 = Employee()
emp_2 = Employee()
###Output
_____no_output_____
###Markdown
If we print these two instances, you will see both of them are Employee objects which are unique -- they each have different locations in the computer's memory.
###Code
print(emp_1)
print(type(emp_1))
###Output
_____no_output_____
###Markdown
Knowing the difference between a class and an instance of that class is important. Its like the difference between a blueprint for a building and an actual building constructed from that blueprint. 2.3.2 Class attributes Variables that are stored within class objects are called **attributes**. Let's add some for our employees:
###Code
emp_1.first = 'John'
emp_1.last= 'Smith'
emp_1.email= '{0}.{1}@berkeley.edu'.format(emp_1.first, emp_1.last)
emp_1.pay= 85000
emp_2.first = 'Jane'
emp_2.last= 'Doe'
emp_2.email= '{0}.{1}@berkeley.edu'.format(emp_2.first, emp_2.last)
emp_2.pay= 20000
###Output
_____no_output_____
###Markdown
Now let's print out the email address for employee 2
###Code
print(emp_2.email)
###Output
_____no_output_____
###Markdown
2.4 Class methods First, a reminder. Methods are just functions of a class. They are constructed the exact same way, with one minor difference: ```def print_hello(self): print("Hello World!")``` The first argument of a method is always the instance object itself. For the sake of convention, we usually name this argument `self`, but it can really be anything. This argument never needs to be specified when the method itself is called. It's just there. Ignore it except when you are defining a method. 2.4.1 the `__init__()` method Back to our employee class. What should we do if we want to create many employee instances? To do this manually it would require writing a lot of code, and we'd probably eventually make a mistake if we re-write it every time.To make this **instantiation** easier we use the `__init__()` method, short for "initialize". It will define an _initial_ state for all instances of this class. As soon as a class instance is instantiated, the code inside of `__init__()` will be run.
###Code
class Employee:
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = '{0}.{1}@berkeley.edu'.format(first, last)
###Output
_____no_output_____
###Markdown
Now we can instantiate `Employee` type objects with attributes right from the start:
###Code
emp_1 = Employee('John', 'Smith', 83000)
emp_2 = Employee('Jane', 'Doe', 20000)
###Output
_____no_output_____
###Markdown
And once you have instantiated an object, you can call it by name and access its attributes:
###Code
print("{0} {1}: ${2}".format(emp_1.first, emp_1.last, emp_1.pay))
###Output
_____no_output_____
###Markdown
2.4.2 Defining your own class methods That's a lot to type each time we want to display the full name of an employee. To make it easier, we can add a **method**. In this case, the instance object is the only argument we need:
###Code
class Employee:
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = '{0}.{1}@berkeley.edu'.format(first, last)
def get_full_name(self):
"""my method
"""
return '{0} {1}'.format(self.first, self.last)
emp_1 = Employee('John', 'Smith', 83000)
emp_2 = Employee('Jane', 'Doe', 20000)
emp_1.get_full_name()
###Output
_____no_output_____
###Markdown
Even though `get_full_name()` doesn't take any arguments, we still need to use `()` to let Python know that it's a function. Let's see what would we get if we print the above code without ( ).
###Code
print(emp_1.get_full_name)
###Output
_____no_output_____
###Markdown
Now let's practice adding more "functionality" to our class. For instance, all staff are supposed to get a 2% raise each year. But the union is renegotiating contracts, so it could be more! We want to add a method to calculate the salary after the raise.
###Code
class Employee:
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = '{0}.{1}@berkeley.edu'.format(first, last)
def get_full_name(self):
return '{0} {1}'.format(self.first, self.last)
def get_new_salary(self, pct_raise=0.02):
return round(self.pay * (1 + pct_raise))
emp_1 = Employee('John', 'Smith', 83000)
emp_2 = Employee('Jane', 'Doe', 20000)
print(emp_1.get_new_salary(.50))
###Output
_____no_output_____
###Markdown
2.4.3 Know thy `self`Any of these methods can be called from the class itself rather than a class instance. When we do that, we pass the instance object in as the first argument:
###Code
print(emp_1.get_new_salary())
print(Employee.get_new_salary(emp_1))
###Output
_____no_output_____
###Markdown
That's why we write `self` when we define methods, and also why we don't need it when we call the method from the class instance object. One common mistake in creating method is forgetting to define the `self` argument. Let's take a quick look to our code to see what that would look like if we forgot our `self`s:
###Code
emp_1 = Employee('John', 'Smith', 83000)
emp_2 = Employee('Jane', 'Doe', 20000)
print(emp_1.get_new_salary())
###Output
_____no_output_____ |
tensorflow/TensorFlow_Exercises/2_LogisticRegression_MNIST_160516.ipynb | ###Markdown
\* *[Notice] I wrote thie code while following the examples in [Choi's Tesorflow-101 tutorial](https://github.com/sjchoi86/Tensorflow-101). And, as I know, most of Choi's examples originally come from [Aymeric Damien's](https://github.com/aymericdamien/TensorFlow-Examples/) and [Nathan Lintz's ](https://github.com/nlintz/TensorFlow-Tutorials) tutorials.* 2. Logistic Regression with MNIST data
###Code
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
#%matplotlib inline
###Output
_____no_output_____
###Markdown
Load MNIST data
###Code
mnist = input_data.read_data_sets('data', one_hot=True)
X_train = mnist.train.images
Y_train = mnist.train.labels
X_test = mnist.test.images
Y_test = mnist.test.labels
dimX = X_train.shape[1]
dimY = Y_train.shape[1]
nTrain = X_train.shape[0]
nTest = X_test.shape[0]
print ("Shape of (X_train, X_test, Y_train, Y_test)")
print (X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
###Output
Shape of (X_train, X_test, Y_train, Y_test)
((55000, 784), (10000, 784), (55000, 10), (10000, 10))
###Markdown
Plot an example image of MNIST data
###Code
myIdx = 36436 # any number
img = np.reshape(X_train[myIdx, :], (28, 28)) # 28 * 28 = 784
plt.matshow(img, cmap=plt.get_cmap('gray'))
plt.show()
###Output
_____no_output_____
###Markdown
Write a TF graph
###Code
X = tf.placeholder(tf.float32, [None, dimX], name="input")
Y= tf.placeholder(tf.float32, [None, dimY], name="output")
W = tf.Variable(tf.zeros([dimX, dimY]), name="weight")
b = tf.Variable(tf.zeros([dimY]), name="bias")
###Output
_____no_output_____
###Markdown
The output of the logic regression is $softmax(Wx+b)$Note that the dimension of *Y_pred* is *(nBatch, dimY)*
###Code
Y_pred = tf.nn.softmax(tf.matmul(X, W) + b)
###Output
_____no_output_____
###Markdown
We use a cross-entropy loss function, $loss = -\Sigma y'\log(y)$*reduce_sum(X, 1)* returns the sum across the columes of the tensor *X* *reduce_mean(X)* returns the mean value for all elements of the tensor *X*
###Code
loss = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(Y_pred), reduction_indices=1))
learning_rate = 0.005
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
training_epochs = 50
display_epoch = 5
batch_size = 100 # For each time, we will use 100 samples to update parameters
###Output
_____no_output_____
###Markdown
Compare prediction with the true value *argmax(X,1)* returns the index of maximum value (which represents the label in this example) across the colums of the tensor *X*
###Code
correct_prediction = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
###Output
_____no_output_____
###Markdown
Run the session We use *with* for load a TF session
###Code
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(training_epochs):
nBatch = int(nTrain/batch_size)
myIdx = np.random.permutation(nTrain)
for ii in range(nBatch):
X_batch = X_train[myIdx[ii*batch_size:(ii+1)*batch_size],:]
Y_batch = Y_train[myIdx[ii*batch_size:(ii+1)*batch_size],:]
sess.run(optimizer, feed_dict={X:X_batch, Y:Y_batch})
if (epoch+1) % display_epoch == 0:
loss_temp = sess.run(loss, feed_dict={X: X_train, Y:Y_train})
accuracy_temp = accuracy.eval({X: X_train, Y:Y_train})
print "(epoch {})".format(epoch+1)
print "[Loss / Training Accuracy] {:05.4f} / {:05.4f}".format(loss_temp, accuracy_temp)
print " "
print "[Test Accuracy] ", accuracy.eval({X: X_test, Y: Y_test})
###Output
(epoch 5)
[Loss / Accuracy] 0.5503 / 0.8677
(epoch 10)
[Loss / Accuracy] 0.4523 / 0.8830
(epoch 15)
[Loss / Accuracy] 0.4115 / 0.8907
(epoch 20)
[Loss / Accuracy] 0.3879 / 0.8957
(epoch 25)
[Loss / Accuracy] 0.3719 / 0.8988
(epoch 30)
[Loss / Accuracy] 0.3601 / 0.9015
(epoch 35)
[Loss / Accuracy] 0.3509 / 0.9033
(epoch 40)
[Loss / Accuracy] 0.3436 / 0.9049
(epoch 45)
[Loss / Accuracy] 0.3374 / 0.9065
(epoch 50)
[Loss / Accuracy] 0.3323 / 0.9078
[Test Accuracy] 0.9143
|
jupyter_notebooks/pandas/mastering_data_analysis/03. Essential Series Commands/05. String Series Methods.ipynb | ###Markdown
String Series MethodsThe previous chapters in this part focused mainly on Series that contained numeric values. In this chapter, we focus on methods that work for Series containing string data. Columns of strings are processed quite differently than columns of numeric values. Remember, there is no string data type in pandas. Instead there is the **object** data type which may contain any Python object. The majority of the time, object columns are entirely composed of strings. Let's begin by reading in the employee dataset and selecting the `dept` column as a Series.
###Code
import pandas as pd
emp = pd.read_csv('../data/employee.csv')
dept = emp['dept']
dept.head(3)
###Output
_____no_output_____
###Markdown
Attempt to take the meanSeveral methods that worked on numeric columns will either not work with strings or provide little value. For instance, the `mean` method raises an error when attempted on a string column.
###Code
dept.mean()
###Output
_____no_output_____
###Markdown
Other methods do workMany of the other methods we covered from the previous chapters in this part work with string columns such as finding the maximum department. The `max` of a string is based on its alphabetical ordering.
###Code
dept.max()
###Output
_____no_output_____
###Markdown
Missing valuesMany other methods work with string columns identically as they do with numeric columns. Below, we calculate the number of missing values. Object data type Series can contain any of three missing value representations. The numpy `NaN` and `NaT` and Python `None` are all counted as missing.
###Code
dept.isna().sum()
###Output
_____no_output_____
###Markdown
The `value_counts` methodThe `value_counts` method is one of the most valuable methods for string columns. It returns the count of each unique value in the Series and sorts it from most to least common.
###Code
dept.value_counts()
###Output
_____no_output_____
###Markdown
Notice what object is returnedThe `value_counts` method returns a Series with the unique values as the index and the count as the new values. Use `normalize=True` for relative frequencyWe can use `value_counts` to return the relative frequency (proportion) of each occurrence instead of the raw count by setting the parameter `normalize` to `True`. For instance, this tells us that 39% of the employees are members of the police department.
###Code
dept.value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
`value_counts` works for columns of all data typesThe `value_counts` method works for columns of all data types and not just strings. It's just usually more informative for string columns. Let's use it on the salary column to see if we have common salaries.
###Code
emp['salary'].value_counts().head(3)
###Output
_____no_output_____
###Markdown
Special methods just for object columnspandas provides a collection of methods only available to object columns. These methods are not directly available using dot notation from the DataFrame variable name and you will not be able to find them as you normally do.To access these special string-only methods, first append the Series variable name with `.str` followed by another dot and then the specific string method. pandas refers to this as the `str` accessor. Think of the term 'accessor' as giving the Series access to more specific specialized string methods. [Visit the official documentation][1] to take a look at the several dozen string-only methods available with the `str` accessor.Let's use the title column for these string-only methods.
###Code
title = emp['title']
title.head()
###Output
_____no_output_____
###Markdown
Make each value lowercaseLet's begin by calling a simple string method to make each value in the `title` Series uppercase. We will use the `lower` method of the `str` accessor.[1]: https://pandas.pydata.org/pandas-docs/stable/reference/series.htmlstring-handling
###Code
title.str.lower().head()
###Output
_____no_output_____
###Markdown
Lot's of methods but mostly easy to useThere is quite a lot of functionality to manipulate and probe strings in almost any way you can imagine. We will not cover every single method possible, but instead, walk through examples of some of the more common ones such as the ones that follow here:* `count` - Returns the number of non-overlapping occurrences of the passed string.* `contains` - Checks to see whether each string contains the given string. Returns a boolean Series* `len` - Returns the number of characters in each string* `split` - Splits the string into multiple strings by a given separator* `replace` - Replaces parts of a string with other characters The `count` str methodThe `count` method returns the number of non-overlapping occurrences of the passed string. Here, we count the number of uppercase 'O' characters appear in each string.
###Code
title.str.count('O').head()
###Output
_____no_output_____
###Markdown
You are not limited to single characters. Here we count the number of times 'ER' appears in each string.
###Code
title.str.count('ER').head()
###Output
_____no_output_____
###Markdown
The `contains` str methodThe `contains` method returns a boolean whether or not the passed string is contained somewhere within the string. Let's determine if any titles contain the letter 'Z'?
###Code
title.str.contains('Z').head(3)
###Output
_____no_output_____
###Markdown
We can then sum this boolean Series to find the number of employees that have a title containing a 'z'.
###Code
title.str.contains('Z').sum()
###Output
_____no_output_____
###Markdown
Let's find out which employees have the word 'POLICE' somewhere in their title.
###Code
title.str.contains('POLICE').head()
###Output
_____no_output_____
###Markdown
Summing this Series reveals the number of employees that have the word 'POLICE' somewhere in their title.
###Code
title.str.contains('POLICE').sum()
###Output
_____no_output_____
###Markdown
The `len` str methodThe `len` string method returns the length of every string. Take note that this is completely different and unrelated to the `len` built-in function which returns the number of elements in a Series.
###Code
title.str.len().head()
###Output
_____no_output_____
###Markdown
The `split` str methodThe `split` method splits each string into multiple separate strings based on a given separator. The default separator is a single space. The following splits on each space and returns a Series of lists.
###Code
title.str.split().head(3)
###Output
_____no_output_____
###Markdown
Set the `expand` parameter to `True` to return a DataFrame:
###Code
title.str.split(expand=True).head(3)
###Output
_____no_output_____
###Markdown
Here, we split on the string 'AN'. Note that the string used for splitting is removed and not contained in the result.
###Code
title.str.split('AN', expand=True).head(3)
###Output
_____no_output_____
###Markdown
The `replace` str methodThe `replace` string method allows you to replace one section of the string (a substring) with some other string. You must pass two string arguments to `replace` - the string you want to replace and its replacement value. Here, we replace 'SENIOR' with 'SR.'.
###Code
title.str.replace('SENIOR', 'SR.').head(3)
###Output
_____no_output_____
###Markdown
Selecting substrings with the bracketsSelecting one or more characters of a regular Python string is simple and accomplished by using either an integer or slice notation within the brackets. Let's review this concept now.
###Code
some_string = 'The Astros will win the world series in 2019'
###Output
_____no_output_____
###Markdown
Select the character at integer location 5.
###Code
some_string[5]
###Output
_____no_output_____
###Markdown
Select the 24th to 36th characters with slice notation.
###Code
some_string[24:36]
###Output
_____no_output_____
###Markdown
You can use the same square brackets appended to the `str` accessor to select one or more characters from every string in a Series. Let's begin by selecting the character at position 10.
###Code
title.str[10].head(3)
###Output
_____no_output_____
###Markdown
In the following example, we use slice notation to select the last five characters of each string.
###Code
title.str[-5:].head(3)
###Output
_____no_output_____
###Markdown
Slice notation is used again to select from the 5th to 15th character.
###Code
title.str[5:15].head()
###Output
_____no_output_____
###Markdown
Many more string-only methodsThere are many more string-only methods that were not covered in this chapter and I would encourage you to explore them on your own. Many of them overlap with the built-in Python string methods. Regular expressionsRegular expressions help match patterns within text. Many of the string methods presented above accept regular expressions as inputs for more advanced string manuevering. They are an important part of doing data analysis and are covered thoroughly in their own part of this book. ExercisesRead in the movie dataset assigning the actor1 column to a variable name as a Series by executing the cell below. All missing values have been dropped from this Series. Use this Series for the exercises below.
###Code
movie = pd.read_csv('../data/movie.csv', index_col='title')
actor1 = movie['actor1'].dropna()
actor1.head(3)
###Output
_____no_output_____ |
.ipynb_checkpoints/Structural Dynamics-checkpoint.ipynb | ###Markdown
Computation for the EigenValues of a 3DOF System in Structural DynamicsIn this example, we will look on how to solve a 3DOF system using python.This uses the library of Sympy (Symbolic Python) to solve the equations in terms of variables (letters) $$ m \ddot(x) + c \dot(x) + k (x) = 0 $$ Step 1: Loading libraries
###Code
# imports numpy and matplotlib
%pylab inline
# imports sympy
import sympy as sm
# prints output using LaTeX for pretty viewing
sm.init_printing(use_latex=True)
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Step 2: Establish Mass Matrix
###Code
# assigning masses
m1 = 1000 #(kg)
m2 = 1500 #(kg)
m3 = 2000 #(kg)
# making the mass matrix
m = sm.Matrix([[m1,0,0],[0,m2,0],[0,0,m3]])
# printing the mass matrix
m
###Output
_____no_output_____
###Markdown
Step 3: Establish Stiffness Matrix
###Code
# computing for the stiffnesses
k1 = 600000 #(N/m)
k2 = 1200000 #(N/m)
k3 = 1800000 #(N/m)
# making the stiffness matrix
k = sm.Matrix([[k1,-k1,0],[-k1,k1+k2,-k2],[0,-k2,k2+k3]])
# printing the stiffness matrix
k
###Output
_____no_output_____
###Markdown
Step 4: Solving the EigenValue ProblemMaking the Matrix
###Code
# assigning the "x" variable as a symbol
x = sm.Symbol('x')
# making the Eigen matrix
A = k-m*x**2
# printing the matrix
A
###Output
_____no_output_____
###Markdown
Getting the Determinant of the Matrix
###Code
sm.det(A).simplify()
###Output
_____no_output_____
###Markdown
Solving the determinants and listing the types of modes
###Code
# This step solves the equation in line
B = sm.solve(sm.det(A),x**2)
# This line converts the array into real number
C = real(B)
# This line makes an array into a complex numbers
D = array((C.astype(complex)))
# Gets the squareroot of the real part of the complex number
E = 1/(sqrt(D.real) / 2 / math.pi)
# printing the Modes
print("Period 1 = {0}, Period 2 = {1}, Period 3 = {2}".format(E[0],E[1],E[2]))
###Output
Period 1 = 0.2023720283163326, Period 2 = 0.43267656159437806, Period 3 = 0.13629624070103266
|
notebooks/Using OpenCV/ip-camera-streaming-into-opencv.ipynb | ###Markdown
IP Camera Streaming into OpenCVAs getting vision from an IP camera into OpenCV is an unnecessarily tricky stumbling block,we’ll only concentrate on the code that streams vision from an IP camera toOpenCV which then simply displays that stream.
###Code
import numpy as np
import cv2
import time
import requests
import threading
from threading import Thread, Event, ThreadError
class Cam():
def __init__(self, url):
self.stream = requests.get(url, stream=True)
self.thread_cancelled = False
self.thread = Thread(target=self.run)
print("camera initialised")
def start(self):
self.thread.start()
print("camera stream started")
def run(self):
bytes=''
while not self.thread_cancelled:
try:
bytes+=self.stream.raw.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
cv2.imshow('cam',img)
if cv2.waitKey(1) ==27:
exit(0)
except ThreadError:
self.thread_cancelled = True
def is_running(self):
return self.thread.isAlive()
def shut_down(self):
self.thread_cancelled = True
#block while waiting for thread to terminate
while self.thread.isAlive():
time.sleep(1)
return True
if __name__ == "__main__":
url = 'http://192.168.2.1/?action=stream'
cam = Cam(url)
cam.start()
###Output
_____no_output_____ |
Covid-19 Data Analysis.ipynb | ###Markdown
Task: Covid-19 Data Analysis This notebook is used to understand the comprehension of Data Analysis techniques using Pandas library. Import the necessary libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Question 1 Read the dataset
###Code
path = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/01-01-2021.csv'
df = pd.read_csv(path)
df.info()
df.head()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4005 entries, 0 to 4004
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 FIPS 3265 non-null float64
1 Admin2 3270 non-null object
2 Province_State 3830 non-null object
3 Country_Region 4005 non-null object
4 Last_Update 4005 non-null object
5 Lat 3917 non-null float64
6 Long_ 3917 non-null float64
7 Confirmed 4005 non-null int64
8 Deaths 4005 non-null int64
9 Recovered 4005 non-null int64
10 Active 4005 non-null int64
11 Combined_Key 4005 non-null object
12 Incident_Rate 3916 non-null float64
13 Case_Fatality_Ratio 3957 non-null float64
dtypes: float64(5), int64(4), object(5)
memory usage: 438.2+ KB
###Markdown
Display the top 5 rows in the data
###Code
pd.DataFrame(np.random.rand(5))
###Output
_____no_output_____
###Markdown
Show the information of the dataset
###Code
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4005 entries, 0 to 4004
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 FIPS 3265 non-null float64
1 Admin2 3270 non-null object
2 Province_State 3830 non-null object
3 Country_Region 4005 non-null object
4 Last_Update 4005 non-null object
5 Lat 3917 non-null float64
6 Long_ 3917 non-null float64
7 Confirmed 4005 non-null int64
8 Deaths 4005 non-null int64
9 Recovered 4005 non-null int64
10 Active 4005 non-null int64
11 Combined_Key 4005 non-null object
12 Incident_Rate 3916 non-null float64
13 Case_Fatality_Ratio 3957 non-null float64
dtypes: float64(5), int64(4), object(5)
memory usage: 438.2+ KB
###Markdown
Show the sum of missing values of features in the dataset
###Code
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Question 2 Show the number of Confirmed cases by Country
###Code
world = df.groupby("Country_Region")['Confirmed'].sum().reset_index()
world.head()
###Output
_____no_output_____
###Markdown
Show the number of Deaths by Country
###Code
world = df.groupby("Country_Region")['Deaths'].sum().reset_index()
world.head()
###Output
_____no_output_____
###Markdown
Show the number of Recovered cases by Country
###Code
world = df.groupby("Country_Region")['Recovered'].sum().reset_index()
world.head()
###Output
_____no_output_____
###Markdown
Show the number of Active Cases by Country
###Code
world = df.groupby("Country_Region")['Active'].sum().reset_index()
world.head()
###Output
_____no_output_____
###Markdown
Show the latest number of Confirmed, Deaths, Recovered and Active cases Country-wise
###Code
world = df.groupby("Country_Region")['Confirmed','Deaths','Recovered','Active'].sum().reset_index()
world.head()
###Output
C:\Users\JABS\AppData\Local\Temp/ipykernel_8804/4148972617.py:1: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.
world = df.groupby("Country_Region")['Confirmed','Deaths','Recovered','Active'].sum().reset_index()
###Markdown
Question 3 Show the countries with no recovered cases
###Code
world = df.groupby('Country_Region')['Recovered'].sum().reset_index()
result = df[df['Recovered']==0][['Country_Region','Recovered']]
print(result)
###Output
Country_Region Recovered
9 Australia 0
23 Belgium 0
24 Belgium 0
25 Belgium 0
26 Belgium 0
... ... ...
4000 Tonga 0
4001 Winter Olympics 2022 0
4002 Antarctica 0
4003 United Kingdom 0
4004 United Kingdom 0
[3402 rows x 2 columns]
###Markdown
Show the countries with no confirmed cases
###Code
world = df.groupby('Country_Region')['Confirmed'].sum().reset_index()
result = df[df['Confirmed']==0][['Country_Region','Confirmed']]
print(result)
###Output
Country_Region Confirmed
78 Canada 0
174 Colombia 0
265 India 0
280 India 0
414 Mexico 0
485 Peru 0
612 Spain 0
700 US 0
712 US 0
721 US 0
742 US 0
756 US 0
760 US 0
894 US 0
945 US 0
1215 US 0
1445 US 0
1656 US 0
1776 US 0
1822 US 0
1859 US 0
1890 US 0
2155 US 0
2335 US 0
2449 US 0
2558 US 0
2733 US 0
2818 US 0
2886 US 0
2937 US 0
3003 US 0
3092 US 0
3142 US 0
3208 US 0
3575 US 0
3718 US 0
3817 US 0
3887 US 0
3919 US 0
3955 United Kingdom 0
3965 United Kingdom 0
3978 China 0
3979 Kiribati 0
3980 Palau 0
3981 New Zealand 0
3982 Summer Olympics 2020 0
3999 Malaysia 0
4000 Tonga 0
4001 Winter Olympics 2022 0
4002 Antarctica 0
###Markdown
Show the countries with no deaths
###Code
world = df.groupby('Country_Region')['Deaths'].sum().reset_index()
result = df[df['Deaths']==0][['Country_Region','Deaths']]
print(result)
###Output
Empty DataFrame
Columns: [Country_Region, Deaths]
Index: []
###Markdown
Question 4 Show the Top 10 countries with Confirmed cases
###Code
world = df.groupby("Country_Region")['Confirmed'].sum().reset_index()
world.head(10)
###Output
_____no_output_____
###Markdown
Show the Top 10 Countries with Active cases
###Code
world = df.groupby("Country_Region")['Active'].sum().reset_index()
world.head(10)
###Output
_____no_output_____
###Markdown
Question 5 Plot Country-wise Total deaths, confirmed, recovered and active casaes where total deaths have exceeded 50,000
###Code
df = df.groupby(["Country_Region"])["Deaths", "Confirmed", "Recovered", "Active"].sum().reset_index()
df = df.sort_values(by='Deaths', ascending=False)
df = df[df['Deaths']>50]
plt.figure(figsize=(15, 5))
plt.plot(df['Country_Region'], df['Deaths'],color='red')
plt.plot(df['Country_Region'], df['Confirmed'],color='green')
plt.plot(df['Country_Region'], df['Recovered'], color='blue')
plt.plot(df['Country_Region'], df['Active'], color='black')
plt.title('Total Deaths(>50000), Confirmed, Recovered and Active Cases by Country')
plt.show()
###Output
C:\Users\JABS\AppData\Local\Temp/ipykernel_8804/3550498885.py:1: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.
df = df.groupby(["Country_Region"])["Deaths", "Confirmed", "Recovered", "Active"].sum().reset_index()
###Markdown
Question 6 Plot Province/State wise Deaths in USA
###Code
import plotly.express as px
import plotly.io as pio
df = df[df['Country_Region']=='US'].drop(df.columns['Country_Region','Lat', 'Long_'],axis=1)
df = df[df.sum(axis = 1) > 0]
df = df.groupby(['Province/State'])['Deaths'].sum().reset_index()
df_death = df[df['Deaths'] > 0]
state_fig = px.bar(df_death, x='Province/State', y='Deaths', title='State wise deaths reported of COVID-19 in USA', text='Deaths')
state_fig.show()
covid_data= pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/01-09-2021.csv')
covid_data.columns
###Output
_____no_output_____
###Markdown
Question 7 Plot Province/State Wise Active Cases in USA Question 8 Plot Province/State Wise Confirmed cases in USA Question 9 Plot Worldwide Confirmed Cases over time
###Code
import plotly.express as px
import plotly.io as pio
import plotly.express as px
###Output
_____no_output_____ |
notebooks/01-3_news-data-exploration.ipynb | ###Markdown
Data Exploration News DataThis notebook will explore data sources for news data (especially short news ticker) for information mining using NLP and sentiment analysis.
###Code
import feedparser
import pandas as pd
d = feedparser.parse('https://www.ft.com/business-education?format=rss')
feed = pd.DataFrame(d['entries'])
meta = d['feed']
feed.head()
d = feedparser.parse('http://rss.cnn.com/rss/money_news_economy.rss')
feed = pd.DataFrame(d['entries'])
meta = d['feed']
feed.head()
###Output
_____no_output_____
###Markdown
Data ParsingThis part outlines the ETL pipeline for the data to use for sentiment analysis
###Code
# TODO
###Output
_____no_output_____ |
babble.ipynb | ###Markdown
BabblingLets write a stupid LSTM RNN which learns to generate text based on a corpus fed to it. Keras has a lovely API so we'll use that, backed up by the brunt of Tensorflow.
###Code
import math
import pandas as pd
import numpy as np
import nltk
from numpy.random import choice
from keras.layers import *
from keras.models import Sequential
###Output
_____no_output_____
###Markdown
Let's load in a big lump of text for the LSTM to read
###Code
book_path = './data/hp_philosophers_stone.txt'
with open(book_path) as f:
text = f.read().lower()
print('corpus length:', len(text))
###Output
corpus length: 439400
###Markdown
Then get a set of the unique characters in the text, and call it our vocabulary. Even in normal text the vocabulary is likely to be quite large - 26 upper case characters, 26 lower case characters, and loads of punctuation
###Code
characters = sorted(list(set(text)))
vocab_size = len(characters)
vocab_size
###Output
_____no_output_____
###Markdown
To make our data computationally interpretable, we should make some kind of index mapping each character to a unique numeric id. We can then represent our full book text as a list of character indicies. In other words, the output will be a long sequence of numbers which spell out the book.
###Code
character_to_index = dict((c, i) for i, c in enumerate(characters))
index_to_character = dict((i, c) for i, c in enumerate(characters))
text_as_indicies = [character_to_index[c] for c in text]
###Output
_____no_output_____
###Markdown
Now we can start splitting that massively long series of numbers into a load of training sequences. We'll use a sequence length of 40, because, having tested this with a bunch of lengths, 40 is a nice round number that seems to work well. It also gives us enough context to start picking up on grammar and sentence cadence without being excessive.
###Code
sequence_length = 40
num_sequences = len(text) - sequence_length + 1
sequences = [text_as_indicies[i : i + sequence_length]
for i in range(num_sequences)]
next_characters = [text_as_indicies[i + 1 : i + sequence_length + 1]
for i in range(num_sequences)]
len(sequences)
###Output
_____no_output_____
###Markdown
Now we need to come up with the series of next-characters that follow each sequence.
###Code
sequences = np.concatenate([[np.array(seq)] for seq in sequences[:-2]])
next_characters = np.concatenate([[np.array(char)] for char in next_characters[:-2]])
###Output
_____no_output_____
###Markdown
Here's an example of the two things we'll be using to train the network
###Code
print('sequence:\n' + str(sequences[0]) + '\n')
print('next characters:\n' + str(sequences[1]))
###Output
sequence:
[44 32 29 1 26 39 49 1 47 32 39 1 36 33 46 29 28 52 52 37 42 10 1 25 38
28 1 37 42 43 10 1 28 45 42 43 36 29 49 8]
next characters:
[32 29 1 26 39 49 1 47 32 39 1 36 33 46 29 28 52 52 37 42 10 1 25 38 28
1 37 42 43 10 1 28 45 42 43 36 29 49 8 1]
###Markdown
Building the modelWe're going to use a pretty generic model structure: - embedding- lstm- dropout- lstm - dropout- dense (time distributed)- softmaxWe're also going to use the ADAM optimizer because it's the best and most clever mashup of things (AdaGrad and RMSProp) ever, sparse categorical cross entropy as our loss function, and the mean average error as our metric.
###Code
model = Sequential([Embedding(vocab_size,
24,
input_length=sequence_length),
LSTM(512,
input_dim=24,
return_sequences=True,
dropout_U=0.2,
dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
LSTM(512,
return_sequences=True,
dropout_U=0.2,
dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['mae'])
###Output
_____no_output_____
###Markdown
Training the model
###Code
model.optimizer.lr = 0.001
model.fit(sequences,
np.expand_dims(next_characters,-1),
batch_size=64,
nb_epoch=1)
###Output
/home/ubuntu/anaconda2/envs/py36/lib/python3.6/site-packages/keras/models.py:851: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`.
warnings.warn('The `nb_epoch` argument in `fit` '
###Markdown
Now that we've trained the model and optimised all of the weights in the network, we can save them to an `.h5` file.
###Code
model.save_weights('models/weights.h5')
###Output
_____no_output_____
###Markdown
Reloading a pretrained modelIf you've build the model and trained it elsewhere, you can reload it by calling `.load_weights()` with the path to the `.h5` file, as follows
###Code
model.load_weights('models/weights.h5')
###Output
_____no_output_____
###Markdown
Babbling
###Code
def babble(seed_string=' '*40, output_length=500):
'''
Say a lot of stupid stuff based on all of the input text
that we trained the model on
Parameters
----------
seed_string : string (optional)
The story that you want your idiot network to be
inspired by
default = 40 spaces
output_length : int (optional)
how long do you want the idiot network to talk for
default = 500
Returns
-------
seed_string : string
the original seed string with 500 characters of new
stuff attached to the end of it
'''
for i in range(output_length):
x = np.array([character_to_index[c] for c in seed_string[-40:]])[np.newaxis,:]
preds = model.predict(x, verbose=0)[0][-1]
preds = preds / np.sum(preds)
next_character = choice(characters, p=preds)
seed_string += next_character
print(seed_string)
babble()
###Output
in it and walked up by trouble glumping on his tricks as harry left harry's broom and back. "let's everyone else had to go bit to look at each other. "just then," harry. but harry, too, ron, and ron fruffled so back for us," ron sighed, as they telling himself against the stone. "then the armchairs wouldn't over his mouth. the flash of the days to give us them id it, just a wafd." this is it must be sort. i dungeon had left professor mcgonagall noticing making the first i've got to said. "co
|
notebooks/02_FastText.ipynb | ###Markdown
fastText y clustering espectralfasText es un método, creado por Facebook, para generar representaciones vectoriales de palabras (embeddings). Es similar a Word2Vec pero tiene la característica de modelar no sólo palabras completas sino también subcadenas (subwords). Gracias a esto, y a la noción de "composición", puede construir la representación de una palabra que nunca vio en el entrenamiento, a partir de la combinación de l representaciones de sus partes/subcadenas. En este notebook entrenaremos representaciones fastText a partir de un corpus en español y posteriormente realizaremos agrupamiento usando la técnica de clustering espectral. 1. Representación vectorial de palabras con fastTextVamos a instalar fastText de manera nativa usando el repositorio de github y la línea de comandos:
###Code
!wget https://github.com/facebookresearch/fastText/archive/v0.9.1.zip
!unzip v0.9.1.zip
%cd fastText-0.9.1
!make
###Output
--2019-08-19 16:34:13-- https://github.com/facebookresearch/fastText/archive/v0.9.1.zip
Resolving github.com (github.com)... 52.74.223.119
Connecting to github.com (github.com)|52.74.223.119|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://codeload.github.com/facebookresearch/fastText/zip/v0.9.1 [following]
--2019-08-19 16:34:19-- https://codeload.github.com/facebookresearch/fastText/zip/v0.9.1
Resolving codeload.github.com (codeload.github.com)... 13.229.189.0
Connecting to codeload.github.com (codeload.github.com)|13.229.189.0|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [application/zip]
Saving to: ‘v0.9.1.zip’
v0.9.1.zip [ <=> ] 4.13M 2.18MB/s in 1.9s
2019-08-19 16:34:22 (2.18 MB/s) - ‘v0.9.1.zip’ saved [4327207]
Archive: v0.9.1.zip
b5b7d307274ce00ef52198fbc692ed3bd11d9856
creating: fastText-0.9.1/
creating: fastText-0.9.1/.circleci/
inflating: fastText-0.9.1/.circleci/cmake_test.sh
inflating: fastText-0.9.1/.circleci/config.yml
inflating: fastText-0.9.1/.circleci/gcc_test.sh
inflating: fastText-0.9.1/.circleci/pip_test.sh
inflating: fastText-0.9.1/.circleci/pull_data.sh
inflating: fastText-0.9.1/.circleci/python_test.sh
inflating: fastText-0.9.1/.circleci/run_locally.sh
inflating: fastText-0.9.1/.circleci/setup_circleimg.sh
inflating: fastText-0.9.1/.circleci/setup_debian.sh
inflating: fastText-0.9.1/.gitignore
inflating: fastText-0.9.1/CMakeLists.txt
inflating: fastText-0.9.1/CODE_OF_CONDUCT.md
inflating: fastText-0.9.1/CONTRIBUTING.md
inflating: fastText-0.9.1/LICENSE
inflating: fastText-0.9.1/MANIFEST.in
inflating: fastText-0.9.1/Makefile
inflating: fastText-0.9.1/README.md
creating: fastText-0.9.1/alignment/
inflating: fastText-0.9.1/alignment/README.md
inflating: fastText-0.9.1/alignment/align.py
inflating: fastText-0.9.1/alignment/eval.py
inflating: fastText-0.9.1/alignment/example.sh
inflating: fastText-0.9.1/alignment/unsup_align.py
inflating: fastText-0.9.1/alignment/utils.py
inflating: fastText-0.9.1/classification-example.sh
inflating: fastText-0.9.1/classification-results.sh
creating: fastText-0.9.1/crawl/
inflating: fastText-0.9.1/crawl/README.md
inflating: fastText-0.9.1/crawl/dedup.cc
inflating: fastText-0.9.1/crawl/download_crawl.sh
inflating: fastText-0.9.1/crawl/filter_dedup.sh
inflating: fastText-0.9.1/crawl/filter_utf8.cc
inflating: fastText-0.9.1/crawl/process_wet_file.sh
creating: fastText-0.9.1/docs/
inflating: fastText-0.9.1/docs/aligned-vectors.md
inflating: fastText-0.9.1/docs/api.md
inflating: fastText-0.9.1/docs/cheatsheet.md
inflating: fastText-0.9.1/docs/crawl-vectors.md
inflating: fastText-0.9.1/docs/dataset.md
inflating: fastText-0.9.1/docs/english-vectors.md
inflating: fastText-0.9.1/docs/faqs.md
inflating: fastText-0.9.1/docs/language-identification.md
inflating: fastText-0.9.1/docs/options.md
inflating: fastText-0.9.1/docs/pretrained-vectors.md
inflating: fastText-0.9.1/docs/python-module.md
inflating: fastText-0.9.1/docs/references.md
inflating: fastText-0.9.1/docs/supervised-models.md
inflating: fastText-0.9.1/docs/supervised-tutorial.md
inflating: fastText-0.9.1/docs/support.md
inflating: fastText-0.9.1/docs/unsupervised-tutorials.md
inflating: fastText-0.9.1/eval.py
inflating: fastText-0.9.1/get-wikimedia.sh
creating: fastText-0.9.1/python/
inflating: fastText-0.9.1/python/README.md
inflating: fastText-0.9.1/python/README.rst
creating: fastText-0.9.1/python/benchmarks/
inflating: fastText-0.9.1/python/benchmarks/README.rst
inflating: fastText-0.9.1/python/benchmarks/get_word_vector.py
creating: fastText-0.9.1/python/doc/
creating: fastText-0.9.1/python/doc/examples/
inflating: fastText-0.9.1/python/doc/examples/FastTextEmbeddingBag.py
inflating: fastText-0.9.1/python/doc/examples/bin_to_vec.py
inflating: fastText-0.9.1/python/doc/examples/compute_accuracy.py
inflating: fastText-0.9.1/python/doc/examples/get_vocab.py
inflating: fastText-0.9.1/python/doc/examples/train_supervised.py
inflating: fastText-0.9.1/python/doc/examples/train_unsupervised.py
creating: fastText-0.9.1/python/fasttext_module/
creating: fastText-0.9.1/python/fasttext_module/fasttext/
inflating: fastText-0.9.1/python/fasttext_module/fasttext/FastText.py
inflating: fastText-0.9.1/python/fasttext_module/fasttext/__init__.py
creating: fastText-0.9.1/python/fasttext_module/fasttext/pybind/
inflating: fastText-0.9.1/python/fasttext_module/fasttext/pybind/fasttext_pybind.cc
creating: fastText-0.9.1/python/fasttext_module/fasttext/tests/
inflating: fastText-0.9.1/python/fasttext_module/fasttext/tests/__init__.py
inflating: fastText-0.9.1/python/fasttext_module/fasttext/tests/test_configurations.py
inflating: fastText-0.9.1/python/fasttext_module/fasttext/tests/test_script.py
creating: fastText-0.9.1/python/fasttext_module/fasttext/util/
inflating: fastText-0.9.1/python/fasttext_module/fasttext/util/__init__.py
inflating: fastText-0.9.1/python/fasttext_module/fasttext/util/util.py
inflating: fastText-0.9.1/quantization-example.sh
inflating: fastText-0.9.1/runtests.py
creating: fastText-0.9.1/scripts/
creating: fastText-0.9.1/scripts/kbcompletion/
inflating: fastText-0.9.1/scripts/kbcompletion/README.md
inflating: fastText-0.9.1/scripts/kbcompletion/data.sh
inflating: fastText-0.9.1/scripts/kbcompletion/eval.cpp
inflating: fastText-0.9.1/scripts/kbcompletion/fb15k.sh
inflating: fastText-0.9.1/scripts/kbcompletion/fb15k237.sh
inflating: fastText-0.9.1/scripts/kbcompletion/svo.sh
inflating: fastText-0.9.1/scripts/kbcompletion/wn18.sh
creating: fastText-0.9.1/scripts/quantization/
inflating: fastText-0.9.1/scripts/quantization/quantization-results.sh
extracting: fastText-0.9.1/setup.cfg
inflating: fastText-0.9.1/setup.py
creating: fastText-0.9.1/src/
inflating: fastText-0.9.1/src/args.cc
inflating: fastText-0.9.1/src/args.h
inflating: fastText-0.9.1/src/densematrix.cc
inflating: fastText-0.9.1/src/densematrix.h
inflating: fastText-0.9.1/src/dictionary.cc
inflating: fastText-0.9.1/src/dictionary.h
inflating: fastText-0.9.1/src/fasttext.cc
inflating: fastText-0.9.1/src/fasttext.h
inflating: fastText-0.9.1/src/loss.cc
inflating: fastText-0.9.1/src/loss.h
inflating: fastText-0.9.1/src/main.cc
inflating: fastText-0.9.1/src/matrix.cc
inflating: fastText-0.9.1/src/matrix.h
inflating: fastText-0.9.1/src/meter.cc
inflating: fastText-0.9.1/src/meter.h
inflating: fastText-0.9.1/src/model.cc
inflating: fastText-0.9.1/src/model.h
inflating: fastText-0.9.1/src/productquantizer.cc
inflating: fastText-0.9.1/src/productquantizer.h
inflating: fastText-0.9.1/src/quantmatrix.cc
inflating: fastText-0.9.1/src/quantmatrix.h
inflating: fastText-0.9.1/src/real.h
inflating: fastText-0.9.1/src/utils.cc
inflating: fastText-0.9.1/src/utils.h
inflating: fastText-0.9.1/src/vector.cc
inflating: fastText-0.9.1/src/vector.h
creating: fastText-0.9.1/tests/
inflating: fastText-0.9.1/tests/fetch_test_data.sh
creating: fastText-0.9.1/website/
inflating: fastText-0.9.1/website/README.md
creating: fastText-0.9.1/website/blog/
inflating: fastText-0.9.1/website/blog/2016-08-18-blog-post.md
inflating: fastText-0.9.1/website/blog/2017-05-02-blog-post.md
inflating: fastText-0.9.1/website/blog/2017-10-02-blog-post.md
inflating: fastText-0.9.1/website/blog/2019-06-25-blog-post.md
creating: fastText-0.9.1/website/core/
inflating: fastText-0.9.1/website/core/Footer.js
inflating: fastText-0.9.1/website/package.json
creating: fastText-0.9.1/website/pages/
creating: fastText-0.9.1/website/pages/en/
inflating: fastText-0.9.1/website/pages/en/index.js
inflating: fastText-0.9.1/website/sidebars.json
inflating: fastText-0.9.1/website/siteConfig.js
creating: fastText-0.9.1/website/static/
creating: fastText-0.9.1/website/static/docs/
creating: fastText-0.9.1/website/static/docs/en/
creating: fastText-0.9.1/website/static/docs/en/html/
extracting: fastText-0.9.1/website/static/docs/en/html/.classfasttext_1_1QMatrix-members.html.i4eKqy
inflating: fastText-0.9.1/website/static/docs/en/html/annotated.html
inflating: fastText-0.9.1/website/static/docs/en/html/annotated_dup.js
inflating: fastText-0.9.1/website/static/docs/en/html/args_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/args_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/args_8h.js
inflating: fastText-0.9.1/website/static/docs/en/html/args_8h_source.html
extracting: fastText-0.9.1/website/static/docs/en/html/bc_s.png
inflating: fastText-0.9.1/website/static/docs/en/html/bdwn.png
inflating: fastText-0.9.1/website/static/docs/en/html/classes.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Args-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Args.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Args.js
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Dictionary-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Dictionary.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Dictionary.js
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1FastText-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1FastText.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1FastText.js
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Matrix-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Matrix.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Matrix.js
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Model-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Model.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Model.js
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1ProductQuantizer-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1ProductQuantizer.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1ProductQuantizer.js
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1QMatrix-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1QMatrix.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1QMatrix.js
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Vector-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Vector.html
inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Vector.js
inflating: fastText-0.9.1/website/static/docs/en/html/closed.png
inflating: fastText-0.9.1/website/static/docs/en/html/dictionary_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/dictionary_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/dictionary_8h.js
inflating: fastText-0.9.1/website/static/docs/en/html/dictionary_8h_source.html
inflating: fastText-0.9.1/website/static/docs/en/html/dir_68267d1309a1af8e8297ef4c3efbcdba.html
inflating: fastText-0.9.1/website/static/docs/en/html/dir_68267d1309a1af8e8297ef4c3efbcdba.js
extracting: fastText-0.9.1/website/static/docs/en/html/doc.png
inflating: fastText-0.9.1/website/static/docs/en/html/doxygen.css
extracting: fastText-0.9.1/website/static/docs/en/html/doxygen.png
inflating: fastText-0.9.1/website/static/docs/en/html/dynsections.js
inflating: fastText-0.9.1/website/static/docs/en/html/fasttext_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/fasttext_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/fasttext_8h.js
inflating: fastText-0.9.1/website/static/docs/en/html/fasttext_8h_source.html
inflating: fastText-0.9.1/website/static/docs/en/html/favicon.png
inflating: fastText-0.9.1/website/static/docs/en/html/files.html
inflating: fastText-0.9.1/website/static/docs/en/html/files.js
extracting: fastText-0.9.1/website/static/docs/en/html/folderclosed.png
extracting: fastText-0.9.1/website/static/docs/en/html/folderopen.png
inflating: fastText-0.9.1/website/static/docs/en/html/functions.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_0x7e.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_b.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_c.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_d.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_dup.js
inflating: fastText-0.9.1/website/static/docs/en/html/functions_e.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_f.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_func.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_g.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_h.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_i.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_k.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_l.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_m.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_n.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_o.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_p.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_q.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_r.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_s.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_t.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_u.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_v.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_vars.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_w.html
inflating: fastText-0.9.1/website/static/docs/en/html/functions_z.html
inflating: fastText-0.9.1/website/static/docs/en/html/globals.html
inflating: fastText-0.9.1/website/static/docs/en/html/globals_defs.html
inflating: fastText-0.9.1/website/static/docs/en/html/globals_func.html
inflating: fastText-0.9.1/website/static/docs/en/html/index.html
inflating: fastText-0.9.1/website/static/docs/en/html/jquery.js
inflating: fastText-0.9.1/website/static/docs/en/html/main_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/main_8cc.js
inflating: fastText-0.9.1/website/static/docs/en/html/matrix_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/matrix_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/matrix_8h_source.html
inflating: fastText-0.9.1/website/static/docs/en/html/menu.js
inflating: fastText-0.9.1/website/static/docs/en/html/menudata.js
inflating: fastText-0.9.1/website/static/docs/en/html/model_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/model_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/model_8h.js
inflating: fastText-0.9.1/website/static/docs/en/html/model_8h_source.html
inflating: fastText-0.9.1/website/static/docs/en/html/namespacefasttext.html
inflating: fastText-0.9.1/website/static/docs/en/html/namespacefasttext.js
inflating: fastText-0.9.1/website/static/docs/en/html/namespacefasttext_1_1utils.html
inflating: fastText-0.9.1/website/static/docs/en/html/namespacemembers.html
inflating: fastText-0.9.1/website/static/docs/en/html/namespacemembers_enum.html
inflating: fastText-0.9.1/website/static/docs/en/html/namespacemembers_func.html
inflating: fastText-0.9.1/website/static/docs/en/html/namespacemembers_type.html
inflating: fastText-0.9.1/website/static/docs/en/html/namespaces.html
inflating: fastText-0.9.1/website/static/docs/en/html/namespaces.js
extracting: fastText-0.9.1/website/static/docs/en/html/nav_f.png
inflating: fastText-0.9.1/website/static/docs/en/html/nav_g.png
inflating: fastText-0.9.1/website/static/docs/en/html/nav_h.png
inflating: fastText-0.9.1/website/static/docs/en/html/navtree.css
inflating: fastText-0.9.1/website/static/docs/en/html/navtree.js
inflating: fastText-0.9.1/website/static/docs/en/html/navtreedata.js
inflating: fastText-0.9.1/website/static/docs/en/html/navtreeindex0.js
inflating: fastText-0.9.1/website/static/docs/en/html/navtreeindex1.js
inflating: fastText-0.9.1/website/static/docs/en/html/open.png
inflating: fastText-0.9.1/website/static/docs/en/html/productquantizer_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/productquantizer_8cc.js
inflating: fastText-0.9.1/website/static/docs/en/html/productquantizer_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/productquantizer_8h_source.html
inflating: fastText-0.9.1/website/static/docs/en/html/qmatrix_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/qmatrix_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/qmatrix_8h_source.html
inflating: fastText-0.9.1/website/static/docs/en/html/real_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/real_8h.js
inflating: fastText-0.9.1/website/static/docs/en/html/real_8h_source.html
inflating: fastText-0.9.1/website/static/docs/en/html/resize.js
creating: fastText-0.9.1/website/static/docs/en/html/search/
extracting: fastText-0.9.1/website/static/docs/en/html/search/.files_7.html.StRRNc
extracting: fastText-0.9.1/website/static/docs/en/html/search/.variables_a.html.1MGQ27
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_10.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_10.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_11.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_11.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_12.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_12.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_13.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_13.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_14.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_14.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_15.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_15.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_16.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_16.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_17.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_17.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_2.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_2.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_3.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_3.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_4.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_4.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_5.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_5.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_6.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_6.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_7.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_7.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_8.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_8.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_9.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_9.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_a.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_a.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_b.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_b.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_c.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_c.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_d.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_d.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_e.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_e.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_f.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/all_f.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_2.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_2.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_3.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_3.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_4.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_4.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_5.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_5.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_6.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_6.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_7.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_7.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_8.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_8.js
extracting: fastText-0.9.1/website/static/docs/en/html/search/close.png
inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_2.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_2.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_3.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_3.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_2.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_2.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_2.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_2.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_3.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_3.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_4.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_4.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_5.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_5.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_2.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_2.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_3.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_3.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_4.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_4.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_5.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_5.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_6.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_6.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_7.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_7.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_8.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/files_8.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_10.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_10.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_11.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_11.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_12.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_12.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_13.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_13.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_14.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_14.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_15.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_15.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_16.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_16.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_17.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_17.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_2.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_2.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_3.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_3.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_4.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_4.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_5.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_5.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_6.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_6.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_7.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_7.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_8.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_8.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_9.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_9.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_a.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_a.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_b.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_b.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_c.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_c.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_d.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_d.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_e.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_e.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_f.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_f.js
extracting: fastText-0.9.1/website/static/docs/en/html/search/mag_sel.png
inflating: fastText-0.9.1/website/static/docs/en/html/search/namespaces_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/namespaces_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/nomatches.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/search.css
inflating: fastText-0.9.1/website/static/docs/en/html/search/search.js
extracting: fastText-0.9.1/website/static/docs/en/html/search/search_l.png
inflating: fastText-0.9.1/website/static/docs/en/html/search/search_m.png
extracting: fastText-0.9.1/website/static/docs/en/html/search/search_r.png
inflating: fastText-0.9.1/website/static/docs/en/html/search/searchdata.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/typedefs_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/typedefs_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/typedefs_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/typedefs_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_0.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_0.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_1.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_1.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_10.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_10.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_11.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_11.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_12.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_12.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_13.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_13.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_2.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_2.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_3.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_3.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_4.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_4.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_5.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_5.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_6.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_6.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_7.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_7.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_8.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_8.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_9.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_9.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_a.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_a.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_b.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_b.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_c.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_c.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_d.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_d.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_e.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_e.js
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_f.html
inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_f.js
inflating: fastText-0.9.1/website/static/docs/en/html/splitbar.png
inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1Node-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1Node.html
inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1Node.js
inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1entry-members.html
inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1entry.html
inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1entry.js
extracting: fastText-0.9.1/website/static/docs/en/html/sync_off.png
extracting: fastText-0.9.1/website/static/docs/en/html/sync_on.png
extracting: fastText-0.9.1/website/static/docs/en/html/tab_a.png
extracting: fastText-0.9.1/website/static/docs/en/html/tab_b.png
extracting: fastText-0.9.1/website/static/docs/en/html/tab_h.png
extracting: fastText-0.9.1/website/static/docs/en/html/tab_s.png
inflating: fastText-0.9.1/website/static/docs/en/html/tabs.css
inflating: fastText-0.9.1/website/static/docs/en/html/utils_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/utils_8cc.js
inflating: fastText-0.9.1/website/static/docs/en/html/utils_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/utils_8h.js
inflating: fastText-0.9.1/website/static/docs/en/html/utils_8h_source.html
inflating: fastText-0.9.1/website/static/docs/en/html/vector_8cc.html
inflating: fastText-0.9.1/website/static/docs/en/html/vector_8cc.js
inflating: fastText-0.9.1/website/static/docs/en/html/vector_8h.html
inflating: fastText-0.9.1/website/static/docs/en/html/vector_8h.js
inflating: fastText-0.9.1/website/static/docs/en/html/vector_8h_source.html
inflating: fastText-0.9.1/website/static/fasttext.css
creating: fastText-0.9.1/website/static/img/
creating: fastText-0.9.1/website/static/img/authors/
inflating: fastText-0.9.1/website/static/img/authors/armand_joulin.jpg
inflating: fastText-0.9.1/website/static/img/authors/christian_puhrsch.png
inflating: fastText-0.9.1/website/static/img/authors/edouard_grave.jpeg
inflating: fastText-0.9.1/website/static/img/authors/piotr_bojanowski.jpg
inflating: fastText-0.9.1/website/static/img/authors/tomas_mikolov.jpg
creating: fastText-0.9.1/website/static/img/blog/
inflating: fastText-0.9.1/website/static/img/blog/2016-08-18-blog-post-img1.png
inflating: fastText-0.9.1/website/static/img/blog/2016-08-18-blog-post-img2.png
inflating: fastText-0.9.1/website/static/img/blog/2017-05-02-blog-post-img1.jpg
inflating: fastText-0.9.1/website/static/img/blog/2017-05-02-blog-post-img2.jpg
inflating: fastText-0.9.1/website/static/img/blog/2017-10-02-blog-post-img1.png
inflating: fastText-0.9.1/website/static/img/cbo_vs_skipgram.png
inflating: fastText-0.9.1/website/static/img/fasttext-icon-api.png
inflating: fastText-0.9.1/website/static/img/fasttext-icon-bg-web.png
inflating: fastText-0.9.1/website/static/img/fasttext-icon-color-square.png
inflating: fastText-0.9.1/website/static/img/fasttext-icon-color-web.png
inflating: fastText-0.9.1/website/static/img/fasttext-icon-faq.png
inflating: fastText-0.9.1/website/static/img/fasttext-icon-tutorial.png
inflating: fastText-0.9.1/website/static/img/fasttext-icon-white-web.png
inflating: fastText-0.9.1/website/static/img/fasttext-logo-color-web.png
inflating: fastText-0.9.1/website/static/img/fasttext-logo-white-web.png
inflating: fastText-0.9.1/website/static/img/logo-color.png
inflating: fastText-0.9.1/website/static/img/model-black.png
inflating: fastText-0.9.1/website/static/img/model-blue.png
inflating: fastText-0.9.1/website/static/img/model-red.png
inflating: fastText-0.9.1/website/static/img/ogimage.png
inflating: fastText-0.9.1/website/static/img/oss_logo.png
inflating: fastText-0.9.1/wikifil.pl
inflating: fastText-0.9.1/word-vector-example.sh
/content/fastText-0.9.1
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/args.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/matrix.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/dictionary.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/loss.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/productquantizer.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/densematrix.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/quantmatrix.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/vector.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/model.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/utils.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/meter.cc
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/fasttext.cc
[01m[Ksrc/fasttext.cc:[m[K In member function ‘[01m[Kvoid fasttext::FastText::quantize(const fasttext::Args&)[m[K’:
[01m[Ksrc/fasttext.cc:323:45:[m[K [01;35m[Kwarning: [m[K‘[01m[Kstd::vector<int> fasttext::FastText::selectEmbeddings(int32_t) const[m[K’ is deprecated: selectEmbeddings is being deprecated. [[01;35m[K-Wdeprecated-declarations[m[K]
auto idx = selectEmbeddings(qargs.cutoff[01;35m[K)[m[K;
[01;35m[K^[m[K
[01m[Ksrc/fasttext.cc:293:22:[m[K [01;36m[Knote: [m[Kdeclared here
std::vector<int32_t> [01;36m[KFastText[m[K::selectEmbeddings(int32_t cutoff) const {
[01;36m[K^~~~~~~~[m[K
[01m[Ksrc/fasttext.cc:[m[K In member function ‘[01m[Kvoid fasttext::FastText::lazyComputeWordVectors()[m[K’:
[01m[Ksrc/fasttext.cc:551:40:[m[K [01;35m[Kwarning: [m[K‘[01m[Kvoid fasttext::FastText::precomputeWordVectors(fasttext::DenseMatrix&)[m[K’ is deprecated: precomputeWordVectors is being deprecated. [[01;35m[K-Wdeprecated-declarations[m[K]
precomputeWordVectors(*wordVectors_[01;35m[K)[m[K;
[01;35m[K^[m[K
[01m[Ksrc/fasttext.cc:534:6:[m[K [01;36m[Knote: [m[Kdeclared here
void [01;36m[KFastText[m[K::precomputeWordVectors(DenseMatrix& wordVectors) {
[01;36m[K^~~~~~~~[m[K
c++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG args.o matrix.o dictionary.o loss.o productquantizer.o densematrix.o quantmatrix.o vector.o model.o utils.o meter.o fasttext.o src/main.cc -o fasttext
###Markdown
Una vez instalado fasText, podemos empezar a entrenar modelos de representaciones vectoriales a partir de un corpus.Montamos el sistema de archivos de Drive:
###Code
#Montamos el contenido de Google Drive
from google.colab import drive
drive.mount('/content/drive')
###Output
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code
Enter your authorization code:
··········
Mounted at /content/drive
###Markdown
Para entrenar el método de Fasttext, utilizaremos la combinación de dos corpus pequeños: a) el Corpus del Español Mexicano COntemporáneo (CEMC); y b) el corpus paralelo náhuatl-español Axolotl (la parte en español). El entrenamiento se hace a través de la línea de comandos, se pueden específicar diversos hiperparámetros ([Consultar documentación](https://fasttext.cc/docs/en/unsupervised-tutorial.html)). Dos parámetros necesarios son el archivo input y el archivo output, que generará el entrenamiento.
###Code
#Se generan dos archivos: cemc.bin (modelo) y cemc.vec (este último es literalmente un archivo de texto con un vector por línea)
#Se pueden descargar usando el visualizador de archivos (pestaña izq.) fastText-0.9.1/result
!mkdir result
!./fasttext cbow -input /content/drive/My\ Drive/Curso_RIIAA/data/cemcytodo.txt -output result/cemc
###Output
/bin/bash: ./fasttext: No such file or directory
###Markdown
Una vez generado el modelo, podemos utilizar estos vectores de diferentes maneras, por ejemplo para una palabra devolver las palabras más cercanas/similares:
###Code
!./fasttext nn result/cemc.bin
###Output
Query word? mole
atole 0.975429
metate 0.946135
tomate 0.943589
ole 0.939852
échale 0.937425
huele 0.935428
hule 0.934699
híjole 0.932914
late 0.930711
jitomate 0.92915
Query word? ^C
###Markdown
2. Cargando los vectores en PythonPodemos cargar los modelos obtenidos de Fasttext y manipularlos directamente desde python, utilizando el paquete gensim.
###Code
# Install / Upgrade Gensim
!pip install --upgrade gensim
###Output
Collecting gensim
[?25l Downloading https://files.pythonhosted.org/packages/40/3d/89b27573f56abcd1b8c9598b240f53c45a3c79aa0924a24588e99716043b/gensim-3.8.0-cp36-cp36m-manylinux1_x86_64.whl (24.2MB)
[K |████████████████████████████████| 24.2MB 42.0MB/s
[?25hRequirement already satisfied, skipping upgrade: six>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.12.0)
Requirement already satisfied, skipping upgrade: scipy>=0.18.1 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.3.1)
Requirement already satisfied, skipping upgrade: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.16.4)
Requirement already satisfied, skipping upgrade: smart-open>=1.7.0 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.8.4)
Requirement already satisfied, skipping upgrade: boto3 in /usr/local/lib/python3.6/dist-packages (from smart-open>=1.7.0->gensim) (1.9.205)
Requirement already satisfied, skipping upgrade: requests in /usr/local/lib/python3.6/dist-packages (from smart-open>=1.7.0->gensim) (2.21.0)
Requirement already satisfied, skipping upgrade: boto>=2.32 in /usr/local/lib/python3.6/dist-packages (from smart-open>=1.7.0->gensim) (2.49.0)
Requirement already satisfied, skipping upgrade: s3transfer<0.3.0,>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from boto3->smart-open>=1.7.0->gensim) (0.2.1)
Requirement already satisfied, skipping upgrade: botocore<1.13.0,>=1.12.205 in /usr/local/lib/python3.6/dist-packages (from boto3->smart-open>=1.7.0->gensim) (1.12.205)
Requirement already satisfied, skipping upgrade: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3->smart-open>=1.7.0->gensim) (0.9.4)
Requirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.7.0->gensim) (2.8)
Requirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.7.0->gensim) (1.24.3)
Requirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.7.0->gensim) (3.0.4)
Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.7.0->gensim) (2019.6.16)
Requirement already satisfied, skipping upgrade: python-dateutil<3.0.0,>=2.1; python_version >= "2.7" in /usr/local/lib/python3.6/dist-packages (from botocore<1.13.0,>=1.12.205->boto3->smart-open>=1.7.0->gensim) (2.5.3)
Requirement already satisfied, skipping upgrade: docutils<0.15,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.13.0,>=1.12.205->boto3->smart-open>=1.7.0->gensim) (0.14)
Installing collected packages: gensim
Found existing installation: gensim 3.6.0
Uninstalling gensim-3.6.0:
Successfully uninstalled gensim-3.6.0
Successfully installed gensim-3.8.0
###Markdown
Cargamos el modelo:
###Code
from gensim.models.wrappers import FastText
#Carga modelo pre-entrenado
model = FastText.load_fasttext_format('result/cemc')
###Output
_____no_output_____
###Markdown
Una vez cargado el modelo podemos jugar con los vectores directamente desde python. Aquí también se pueden buscar palabras similares o bien determinar la similitud entre una y otra palabra:
###Code
#Buscar las palabras más similares a query
print(model.most_similar('azteca'))
print(model.most_similar('mexicano'))
#Similitud entre dos palabras dasdas
print(model.similarity('mexico', 'país'))
###Output
[('zapoteca', 0.9707117676734924), ('barbilla', 0.9341251850128174), ('polla', 0.9327656626701355), ('chilena', 0.9314919114112854), ('azuela', 0.9312282800674438), ('orilla', 0.9310535192489624), ('bonilla', 0.9304966330528259), ('zorrilla', 0.9283484220504761), ('borbolla', 0.9271571040153503), ('chueca', 0.9267408847808838)]
[('americano', 0.9683598279953003), ('mexico', 0.9493384957313538), ('norteamericano', 0.9465785622596741), ('africano', 0.933608889579773), ('republicano', 0.9239773750305176), ('mexicanismo', 0.9033154249191284), ('latinoamericano', 0.9010395407676697), ('universitario', 0.9005328416824341), ('organizador', 0.8923201560974121), ('italiano', 0.8891371488571167)]
0.3114357
###Markdown
Una de las ventajas de fastText es que, además de obtener los vectores de palabras que se encontraban en el vocabulario de entrenamiento, es capaz de construir representaciones vectoriales de palabras que no estaban en este vocabulario (*Out-Of-Vocabulary words, OOV*). Esto se realiza a través de una operación de composición de subwords.
###Code
#Palabra dentro del vocabulario
existent_word = "computadora"
print(existent_word in model.wv.vocab)
#Obtención del vector de esta palabra
vector_computadora = model.wv[existent_word]
#Palabra oov
oov_word = "computadorsota"
print(oov_word in model.wv.vocab)
#Obtención del vector de oov
vector_oov = model.wv[oov_word]
#Similitud entre ambos
print(model.similarity(existent_word, oov_word))
###Output
True
False
0.96310055
###Markdown
3. Agrupamiento con spectral clusteringUna vez obtenidos los vectores de Fasttext, podemos aplicar el algoritmo de spectral clustering, vamos a agrupar y visualizar los datos obtenidos.
###Code
#Paquetería necesaria
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import pandas as pd
import networkx as nx
from scipy.linalg import eig
from operator import itemgetter
###Output
_____no_output_____
###Markdown
Dado que Fasttext permite representar palabras OOV, podemos darle cualquier lista arbitraría de léxico y obtener vectores que los representen.
###Code
#Lista de palabras para trabajar
vocab_words = ['amo','amas','amamos','ama','aman','come','como','comemos','comen','toco','tocas','tocan','tocamos','gato','minino','gatito','gatos','mininos',
'flor','flores','mininito','computadora','computadoras']
###Output
_____no_output_____
###Markdown
Obtenemos los vectores que representan a las palabras anteriores y los guardamos en un arreglo.
###Code
#Tamaño del vocabulario
N = len(vocab_words)
#Matrix de NxNúmero de dimmensiones
X = np.zeros((N,100))
#Llenamos la matriz con los vectores palabra
for i,w in enumerate(vocab_words):
X[i] = model.wv[w]
print(X.shape)
###Output
(23, 100)
###Markdown
Podemos visualizar estos datos a partir de la siguiente función:
###Code
#Función para plotear
def plot_words(Z,ids,color='blue'):
#Reduce a dos dimensiones con PCA
Z = PCA(n_components=2).fit_transform(Z)
r=0
#Plotea las dimensiones
plt.scatter(Z[:,0],Z[:,1], marker='o', c=color)
for label,x,y in zip(ids, Z[:,0], Z[:,1]):
#Agrega las etiquetas
plt.annotate(label, xy=(x,y), xytext=(-1,1), textcoords='offset points', ha='center', va='bottom')
r+=1
#Ploteo de los datos
plot_words(X, vocab_words)
plt.show()
###Output
_____no_output_____
###Markdown
A partir de la matriz de vectores, aplicaremos el algoritmo de spectral clustering. Para esto, crearemos una matriz de adyacencia que represente al grafo. Utilizaremos la siguiente función kernel para cuantificar la similitud entre dos vectores $$k(u,v) = \frac{1}{||u-v||+1}$$Además, utilizaremos un planteamiento de k-nearest graph, donde sólo consideraremos adyacentes los k-vecinos más cercanos.
###Code
#Matriz de adyacencia
A = np.zeros((N,N))
for k,u in enumerate(X):
#Vecinos de u, con sus distancias
candidates_for_k = {}
for j,v in enumerate(X):
#Distancia euclideana
dist = np.linalg.norm(u-v)
#Guarda la distancia de los vecinos
candidates_for_k[j] = dist
#Ordena los vecinos por distancia
neighbors = sorted(candidates_for_k.items(), key=itemgetter(1))
#El primer vecino es siempre el mismo nodo (||u-u||=0)
neighbors.pop(0)
#Toma los 1-vecinos más cercanos
nn = neighbors[:1]
for neigh, weight in nn:
#Llena la matriz de adyacencia
A[k,neigh] = 1./(weight+1)
A[neigh,k] = 1./(weight+1)
print(A.shape)
#Se comprueba que la matriz sea simétrica
print((A == A.T).all())
###Output
True
###Markdown
Podemos visualizar la matriz de adyacencia en un formato de tabla:
###Code
#Formato de tabla con Pandas
df = pd.DataFrame(A, index=vocab_words, columns=vocab_words)
print(df.to_string())
###Output
amo amas amamos ama aman come como comemos comen toco tocas tocan tocamos gato minino gatito gatos mininos flor flores mininito computadora computadoras
amo 0.000000 0.000000 0.000000 0.143727 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.161970 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
amas 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.16533 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.139763 0.000000 0.000000 0.000000
amamos 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.322724 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
ama 0.143727 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.139422 0.000000 0.000000 0.000000 0.000000
aman 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.207049 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
come 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.165684 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
como 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.197728 0.000000
comemos 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.275011 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
comen 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.205471 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
toco 0.000000 0.000000 0.000000 0.000000 0.000000 0.165684 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.228812 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
tocas 0.000000 0.165330 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
tocan 0.000000 0.000000 0.000000 0.000000 0.207049 0.000000 0.000000 0.000000 0.205471 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
tocamos 0.000000 0.000000 0.322724 0.000000 0.000000 0.000000 0.000000 0.275011 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
gato 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.282277 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
minino 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.278849 0.000000 0.000000
gatito 0.161970 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.228812 0.00000 0.000000 0.000000 0.282277 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.289433 0.000000 0.000000
gatos 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.209625 0.000000 0.000000 0.000000 0.000000 0.000000
mininos 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.209625 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
flor 0.000000 0.000000 0.000000 0.139422 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
flores 0.000000 0.139763 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
mininito 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.278849 0.289433 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
computadora 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.197728 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.233841
computadoras 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.233841 0.000000
###Markdown
También podemos visualizar el grafo en formato de red:
###Code
#Indexado de labels
edges = {i:dat for i,dat in enumerate(vocab_words)}
#Formato de red con Networkx
nx.draw_networkx(nx.from_numpy_array(A), with_labels=True, labels=edges, font_size=8)
###Output
_____no_output_____
###Markdown
Ya que hemos obtenido la matriz de adyacencia, podemos calcular la matriz Laplaciana y obtener su descomposición espectral.
###Code
#Se obtiene la matriz Laplaciana
L = np.diag(A.sum(0)) - A
#Se calculan los eigen valores y eigen vectores de L
eig_vals, eig_vecs = eig(L)
#Se ordenan con respecto a los eigenvalores
values = sorted(zip(eig_vals.real,eig_vecs), key=itemgetter(0))
#Obtenemos ambos eigens
vals, vecs = zip(*values)
#Se crea una matriz de eigenvectores
matrix = np.array(vecs)
#Visualización de eigenvalores
plt.plot(np.array(vals),'o')
plt.show()
###Output
_____no_output_____
###Markdown
Tomamos, entonces, los k eigenvectores que mejor representen nuestros datos y así podemos visualizar los resultados obtenidos con la función de ploteo que hemos definido más arriba.
###Code
#Obtiene la matriz de eigenvectores laplacianos
X_hat = matrix[5:17].T.real
#Ploteo de datos
print(X_hat.shape)
plot_words(X_hat,vocab_words)
plt.show()
###Output
(23, 12)
###Markdown
Finalmente, aplicamos k-medias para descubrir grupos en los datos proyectados:
###Code
#Número de centroides
centroids=10
#Aplicación de kmenas
kmeans = KMeans(n_clusters=centroids).fit(X_hat)
#Obtención de los clusters
pred_lables = kmeans.predict(X_hat)
#Plot de clusters
plot_words(X_hat, vocab_words, color=pred_lables)
plt.show()
###Output
_____no_output_____
###Markdown
--- Alternativa opcionalSe puede aplicar Fasttext a una fracción de corpus real de Wikipedia en inglés (muy tardado entrenar)
###Code
!mkdir data
!wget -c http://mattmahoney.net/dc/enwik9.zip -P data
!unzip data/enwik9.zip -d data
!perl wikifil.pl data/enwik9 > data/fil9
!mkdir result
!./fasttext cbow -input data/fil9 -output result/fil9
###Output
mkdir: cannot create directory ‘result’: File exists
Read 0M words
Number of words: 0
Number of labels: 0
terminate called after throwing an instance of 'std::invalid_argument'
what(): Empty vocabulary. Try a smaller -minCount value.
|
CNN_PyTorch/4-2. Classify FashionMNIST, solution 1.ipynb | ###Markdown
CNN for Classification---In this and the next notebook, we define **and train** a CNN to classify images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist). We are providing two solutions to show you how different network structures and training strategies can affect the performance and accuracy of a CNN. This first solution will be a simple CNN with two convolutional layers. Please note that this is just one possible solution out of many! Load the [data](https://pytorch.org/docs/stable/torchvision/datasets.html)In this cell, we load in both **training and test** datasets from the FashionMNIST class.
###Code
# our basic libraries
import torch
import torchvision
# data loading and transforming
from torchvision.datasets import FashionMNIST
from torch.utils.data import DataLoader
from torchvision import transforms
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors for input into a CNN
## Define a transform to read the data in as a tensor
data_transform = transforms.ToTensor()
# choose the training and test datasets
train_data = FashionMNIST(root='./data', train=True,
download=True, transform=data_transform)
test_data = FashionMNIST(root='./data', train=False,
download=True, transform=data_transform)
# Print out some stats about the training and test data
print('Train data, number of images: ', len(train_data))
print('Test data, number of images: ', len(test_data))
# prepare data loaders, set the batch_size
## TODO: you can try changing the batch_size to be larger or smaller
## when you get to training your network, see how batch_size affects the loss
batch_size = 20
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
# specify the image classes
classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
_____no_output_____
###Markdown
Visualize some training dataThis cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(batch_size):
ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title(classes[labels[idx]])
###Output
_____no_output_____
###Markdown
Define the network architectureThe various layers that make up any neural network are documented, [here](https://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll use a simple series of layers:* Convolutional layers* Maxpooling layers* Fully-connected (linear) layersYou are also encouraged to look at adding [dropout layers](https://pytorch.org/docs/stable/nn.htmldropout) to avoid overfitting this data.---To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in.Note: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network. Define the Layers in ` __init__`As a reminder, a conv/pool layer may be defined like this (in `__init__`):``` 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernelself.conv1 = nn.Conv2d(1, 32, 3) maxpool that uses a square window of kernel_size=2, stride=2self.pool = nn.MaxPool2d(2, 2) ``` Refer to Layers in `forward`Then referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied:```x = self.pool(F.relu(self.conv1(x)))```You must place any layers with trainable weights, such as convolutional layers, in the `__init__` function and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, may appear *only* in the `forward` function. In practice, you'll often see conv/pool layers defined in `__init__` and activations defined in `forward`. Convolutional layerThe first convolution layer has been defined for you, it takes in a 1 channel (grayscale) image and outputs 10 feature maps as output, after convolving the image with 3x3 filters. FlatteningRecall that to move from the output of a convolutional/pooling layer to a linear layer, you must first flatten your extracted features into a vector. If you've used the deep learning library, Keras, you may have seen this done by `Flatten()`, and in PyTorch you can flatten an input `x` with `x = x.view(x.size(0), -1)`. TODO: Define the rest of the layersIt will be up to you to define the other layers in this network; we have some recommendations, but you may change the architecture and parameters as you see fit.Recommendations/tips:* Use at least two convolutional layers* Your output must be a linear layer with 10 outputs (for the 10 classes of clothing)* Use a dropout layer to avoid overfitting A note on output sizeFor any convolutional layer, the output feature maps will have the specified depth (a depth of 10 for 10 filters in a convolutional layer) and the dimensions of the produced feature maps (width/height) can be computed as the _input image_ width/height, W, minus the filter size, F, divided by the stride, S, all + 1. The equation looks like: `output_dim = (W-F)/S + 1`, for an assumed padding size of 0. You can find a derivation of this formula, [here](http://cs231n.github.io/convolutional-networks/conv).For a pool layer with a size 2 and stride 2, the output dimension will be reduced by a factor of 2. Read the comments in the code below to see the output size for each layer.
###Code
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel (grayscale), 10 output channels/feature maps
# 3x3 square convolution kernel
## output size = (W-F)/S +1 = (28-3)/1 +1 = 26
# the output Tensor for one image, will have the dimensions: (10, 26, 26)
# after one pool layer, this becomes (10, 13, 13)
self.conv1 = nn.Conv2d(1, 10, 3)
# maxpool layer
# pool with kernel_size=2, stride=2
self.pool = nn.MaxPool2d(2, 2)
# second conv layer: 10 inputs, 20 outputs, 3x3 conv
## output size = (W-F)/S +1 = (13-3)/1 +1 = 11
# the output tensor will have dimensions: (20, 11, 11)
# after another pool layer this becomes (20, 5, 5); 5.5 is rounded down
self.conv2 = nn.Conv2d(10, 20, 3)
# 20 outputs * the 5*5 filtered/pooled map size
# 10 output channels (for the 10 classes)
self.fc1 = nn.Linear(20*5*5, 10)
# define the feedforward behavior
def forward(self, x):
# two conv/relu + pool layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
# prep for linear layer
# flatten the inputs into a vector
x = x.view(x.size(0), -1)
# one linear layer
x = F.relu(self.fc1(x))
# a softmax layer to convert the 10 outputs into a distribution of class scores
x = F.log_softmax(x, dim=1)
# final output
return x
# instantiate and print your Net
net = Net()
print(net)
###Output
Net(
(conv1): Conv2d(1, 10, kernel_size=(3, 3), stride=(1, 1))
(pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv2): Conv2d(10, 20, kernel_size=(3, 3), stride=(1, 1))
(fc1): Linear(in_features=500, out_features=10, bias=True)
)
###Markdown
TODO: Specify the loss function and optimizerLearn more about [loss functions](https://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizers](https://pytorch.org/docs/stable/optim.html) in the online documentation.Note that for a classification problem like this, one typically uses cross entropy loss, which can be defined in code like: `criterion = nn.CrossEntropyLoss()`; cross entropy loss combines `softmax` and `NLL loss` so, alternatively (as in this example), you may see NLL Loss being used when the output of our Net is a distribution of class scores. PyTorch also includes some standard stochastic optimizers like stochastic gradient descent and Adam. You're encouraged to try different optimizers and see how your model responds to these choices as it trains.
###Code
import torch.optim as optim
## TODO: specify loss function
# cross entropy loss combines softmax and nn.NLLLoss() in one single class.
criterion = nn.NLLLoss()
## TODO: specify optimizer
# stochastic gradient descent with a small learning rate
optimizer = optim.SGD(net.parameters(), lr=0.001)
###Output
_____no_output_____
###Markdown
A note on accuracyIt's interesting to look at the accuracy of your network **before and after** training. This way you can really see that your network has learned something. In the next cell, let's see what the accuracy of an untrained network is (we expect it to be around 10% which is the same accuracy as just guessing for all 10 classes).
###Code
# Calculate accuracy before training
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
# forward pass to get outputs
# the outputs are a series of class scores
outputs = net(images)
# get the predicted class from the maximum value in the output-list of class scores
_, predicted = torch.max(outputs.data, 1)
# count up total number of correct labels
# for which the predicted and true labels are equal
total += labels.size(0)
correct += (predicted == labels).sum()
# calculate the accuracy
# to convert `correct` from a Tensor into a scalar, use .item()
accuracy = 100.0 * correct.item() / total
# print it out!
print('Accuracy before training: ', accuracy)
###Output
Accuracy before training: 10.52
###Markdown
Train the NetworkBelow, we've defined a `train` function that takes in a number of epochs to train for. * The number of epochs is how many times a network will cycle through the entire training dataset. * Inside the epoch loop, we loop over the training dataset in batches; recording the loss every 1000 batches.Here are the steps that this training function performs as it iterates over the training dataset:1. Zero's the gradients to prepare for a forward pass2. Passes the input through the network (forward pass)3. Computes the loss (how far is the predicted classes are from the correct labels)4. Propagates gradients back into the network’s parameters (backward pass)5. Updates the weights (parameter update)6. Prints out the calculated loss
###Code
def train(n_epochs):
loss_over_time = [] # to track the loss as the network trains
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
for batch_i, data in enumerate(train_loader):
# get the input images and their corresponding labels
inputs, labels = data
# zero the parameter (weight) gradients
optimizer.zero_grad()
# forward pass to get outputs
outputs = net(inputs)
# calculate the loss
loss = criterion(outputs, labels)
# backward pass to calculate the parameter gradients
loss.backward()
# update the parameters
optimizer.step()
# print loss statistics
# to convert loss into a scalar and add it to running_loss, we use .item()
running_loss += loss.item()
if batch_i % 1000 == 999: # print every 1000 batches
avg_loss = running_loss/1000
# record and print the avg loss over the 1000 batches
loss_over_time.append(avg_loss)
print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch + 1, batch_i+1, avg_loss))
running_loss = 0.0
print('Finished Training')
return loss_over_time
# define the number of epochs to train for
n_epochs = 30 # start small to see if your model works, initially
# call train and record the loss over time
training_loss = train(n_epochs)
###Output
Epoch: 1, Batch: 1000, Avg. Loss: 2.2868175230026244
Epoch: 1, Batch: 2000, Avg. Loss: 2.2556393740177154
Epoch: 1, Batch: 3000, Avg. Loss: 2.205124769091606
Epoch: 2, Batch: 1000, Avg. Loss: 2.1203471163511276
Epoch: 2, Batch: 2000, Avg. Loss: 2.0477432513237
Epoch: 2, Batch: 3000, Avg. Loss: 1.9815359983444214
Epoch: 3, Batch: 1000, Avg. Loss: 1.8996226536035539
Epoch: 3, Batch: 2000, Avg. Loss: 1.8147404186725618
Epoch: 3, Batch: 3000, Avg. Loss: 1.7321927509307862
Epoch: 4, Batch: 1000, Avg. Loss: 1.578281832098961
Epoch: 4, Batch: 2000, Avg. Loss: 1.5266655530929565
Epoch: 4, Batch: 3000, Avg. Loss: 1.4980273706316949
Epoch: 5, Batch: 1000, Avg. Loss: 1.4726707084178925
Epoch: 5, Batch: 2000, Avg. Loss: 1.4684425346851349
Epoch: 5, Batch: 3000, Avg. Loss: 1.4494574863910674
Epoch: 6, Batch: 1000, Avg. Loss: 1.4456084757447243
Epoch: 6, Batch: 2000, Avg. Loss: 1.4305420234799384
Epoch: 6, Batch: 3000, Avg. Loss: 1.4167207903862
Epoch: 7, Batch: 1000, Avg. Loss: 1.407468405842781
Epoch: 7, Batch: 2000, Avg. Loss: 1.4014562340378762
Epoch: 7, Batch: 3000, Avg. Loss: 1.4169175248742103
Epoch: 8, Batch: 1000, Avg. Loss: 1.4001869242191314
Epoch: 8, Batch: 2000, Avg. Loss: 1.3928540123105049
Epoch: 8, Batch: 3000, Avg. Loss: 1.3802106212377547
Epoch: 9, Batch: 1000, Avg. Loss: 1.3772892249822617
Epoch: 9, Batch: 2000, Avg. Loss: 1.3825336514115334
Epoch: 9, Batch: 3000, Avg. Loss: 1.3705663481354713
Epoch: 10, Batch: 1000, Avg. Loss: 1.3694153184890747
Epoch: 10, Batch: 2000, Avg. Loss: 1.3810064570605756
Epoch: 10, Batch: 3000, Avg. Loss: 1.341630794942379
Epoch: 11, Batch: 1000, Avg. Loss: 1.3677116389870643
Epoch: 11, Batch: 2000, Avg. Loss: 1.3436600825190543
Epoch: 11, Batch: 3000, Avg. Loss: 1.350110428392887
Epoch: 12, Batch: 1000, Avg. Loss: 1.3445810881853104
Epoch: 12, Batch: 2000, Avg. Loss: 1.3499431834816933
Epoch: 12, Batch: 3000, Avg. Loss: 1.3393242843151092
Epoch: 13, Batch: 1000, Avg. Loss: 1.3306362637281417
Epoch: 13, Batch: 2000, Avg. Loss: 1.330379969418049
Epoch: 13, Batch: 3000, Avg. Loss: 1.3463139534592627
Epoch: 14, Batch: 1000, Avg. Loss: 1.3359011572301387
Epoch: 14, Batch: 2000, Avg. Loss: 1.3317513466477393
Epoch: 14, Batch: 3000, Avg. Loss: 1.3167364555597305
Epoch: 15, Batch: 1000, Avg. Loss: 1.3136654596626758
Epoch: 15, Batch: 2000, Avg. Loss: 1.3182315327227117
###Markdown
Visualizing the lossA good indication of how much your network is learning as it trains is the loss over time. In this example, we printed and recorded the average loss for each 1000 batches and for each epoch. Let's plot it and see how the loss decreases (or doesn't) over time.In this case, you can see that it takes a little bit for a big initial loss decrease, and the loss is flattening out over time.
###Code
# visualize the loss as the network trained
plt.plot(training_loss)
plt.xlabel('1000\'s of batches')
plt.ylabel('loss')
plt.ylim(0, 2.5) # consistent scale
plt.show()
###Output
_____no_output_____
###Markdown
Test the Trained NetworkOnce you are satisfied with how the loss of your model has decreased, there is one last step: test!You must test your trained model on a previously unseen dataset to see if it generalizes well and can accurately classify this new dataset. For FashionMNIST, which contains many pre-processed training images, a good model should reach **greater than 85% accuracy** on this test dataset. If you are not reaching this value, try training for a larger number of epochs, tweaking your hyperparameters, or adding/subtracting layers from your CNN.
###Code
# initialize tensor and lists to monitor test loss and accuracy
test_loss = torch.zeros(1)
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
# set the module to evaluation mode
net.eval()
for batch_i, data in enumerate(test_loader):
# get the input images and their corresponding labels
inputs, labels = data
# forward pass to get outputs
outputs = net(inputs)
# calculate the loss
loss = criterion(outputs, labels)
# update average test loss
test_loss = test_loss + ((torch.ones(1) / (batch_i + 1)) * (loss.data - test_loss))
# get the predicted class from the maximum value in the output-list of class scores
_, predicted = torch.max(outputs.data, 1)
# compare predictions to true label
# this creates a `correct` Tensor that holds the number of correctly classified images in a batch
correct = np.squeeze(predicted.eq(labels.data.view_as(predicted)))
# calculate test accuracy for *each* object class
# we get the scalar value of correct items for a class, by calling `correct[i].item()`
for i in range(batch_size):
label = labels.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
print('Test Loss: {:.6f}\n'.format(test_loss.numpy()[0]))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.784023
Test Accuracy of T-shirt/top: 92% (925/1000)
Test Accuracy of Trouser: 96% (967/1000)
Test Accuracy of Pullover: 0% ( 0/1000)
Test Accuracy of Dress: 87% (873/1000)
Test Accuracy of Coat: 91% (911/1000)
Test Accuracy of Sandal: 94% (945/1000)
Test Accuracy of Shirt: 0% ( 0/1000)
Test Accuracy of Sneaker: 93% (935/1000)
Test Accuracy of Bag: 96% (967/1000)
Test Accuracy of Ankle boot: 93% (938/1000)
Test Accuracy (Overall): 74% (7461/10000)
###Markdown
Visualize sample test resultsFormat: predicted class (true class)
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get predictions
preds = np.squeeze(net(images).data.max(1, keepdim=True)[1].numpy())
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(batch_size):
ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Question: What are some weaknesses of your model? (And how might you improve these in future iterations.) **Answer**: This model performs well on everything but shirts and pullovers (0% accuracy); it looks like this incorrectly classifies most of those as a coat which has a similar overall shape. Because it performs well on everything but these two classes, I suspect this model is overfitting certain classes at the cost of generalization. I suspect that this accuracy could be improved by adding some dropout layers to aoid overfitting.
###Code
# Saving the model
model_dir = 'saved_models/'
model_name = 'fashion_net_simple.pt'
# after training, save your model parameters in the dir 'saved_models'
# when you're ready, un-comment the line below
torch.save(net.state_dict(), model_dir+model_name)
###Output
_____no_output_____ |
pytorch_forecasting_hello.ipynb | ###Markdown
###Code
!pip install pytorch-forecasting
###Output
_____no_output_____
###Markdown
NOT quite done yet
###Code
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer
from pytorch_forecasting.data.examples import get_stallion_data
from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet
from pytorch_forecasting.data import GroupNormalizer
from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss
from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
!pip install microprediction
###Output
_____no_output_____
###Markdown
Hello world exampleSee https://www.microprediction.com/blog/popular-timeseries-packages for more packages
###Code
from microprediction import MicroReader
mr = MicroReader()
YS = mr.get_lagged_values(name='emojitracker-twitter-face_with_medical_mask.json')[:200]
import pytorch_lightning as pl
import pandas as pd
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer
def run(ys):
""" Apply model to univariate time series
# Not really the strength of this library but why not?
:param ys: Vector of observations
:param theta: Unused (at present) parameters or hyper-params
:return: Vector of predictions
"""
burnin = len(ys)
data = pd.DataFrame(columns=['y'],data=ys[:burnin])
data["time_idx"] = list(range(burnin))
data["group_id"] = ["same" for _ in range(burnin)]
max_prediction_length = 1
max_encoder_length = 24
training_cutoff = data["time_idx"].max() - max_prediction_length
training = TimeSeriesDataSet(
data[lambda x: x.time_idx <= training_cutoff],
time_idx="time_idx",
target="y",
min_encoder_length=max_encoder_length // 2, # keep encoder length long (as it is in the validation set)
max_encoder_length=max_encoder_length,
min_prediction_length=1,
max_prediction_length=max_prediction_length,
add_relative_time_idx=True,
add_target_scales=True,
add_encoder_length=True,
group_ids=["group_id"]
)
validation = TimeSeriesDataSet.from_dataset(training, data, predict=True, stop_randomization=True)
batch_size = 128 # set this between 32 to 128
train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers=0)
pl.seed_everything(42)
trainer = pl.Trainer(
gpus=0,
# clipping gradients is a hyperparameter and important to prevent divergance
# of the gradient for recurrent neural networks
gradient_clip_val=0.1,
)
tft = TemporalFusionTransformer.from_dataset(
training,
# not meaningful for finding the learning rate but otherwise very important
learning_rate=0.03,
hidden_size=16, # most important hyperparameter apart from learning rate
# number of attention heads. Set to up to 4 for large datasets
attention_head_size=1,
dropout=0.1, # between 0.1 and 0.3 are good values
hidden_continuous_size=8, # set to <= hidden_size
output_size=7, # 7 quantiles by default
loss=QuantileLoss(),
# reduce learning rate if no improvement in validation loss after x epochs
reduce_on_plateau_patience=4,
)
print(f"Number of parameters in network: {tft.size() / 1e3:.1f}k")
# find optimal learning rate
res = trainer.tuner.lr_find(
tft,
train_dataloader=train_dataloader,
val_dataloaders=val_dataloader,
max_lr=10.0,
min_lr=1e-6,
)
# configure network and trainer
early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=10, verbose=False, mode="min")
lr_logger = LearningRateMonitor() # log the learning rate
trainer = pl.Trainer(
max_epochs=30,
gpus=0,
weights_summary="top",
gradient_clip_val=0.1,
limit_train_batches=30, # coment in for training, running valiation every 30 batches
# fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
callbacks=[lr_logger, early_stop_callback],
logger=TensorBoardLogger("lightning_logs"),
)
tft = TemporalFusionTransformer.from_dataset(
training,
learning_rate=0.03,
hidden_size=16,
attention_head_size=1,
dropout=0.1,
hidden_continuous_size=8,
output_size=7, # 7 quantiles by default
loss=QuantileLoss(),
log_interval=10,
# uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
reduce_on_plateau_patience=4,
)
print(f"Number of parameters in network: {tft.size() / 1e3:.1f}k")
trainer.fit(
tft,
train_dataloader=train_dataloader,
val_dataloaders=val_dataloader,
)
# load the best model according to the validation loss
# (given that we use early stopping, this is not necessarily the last epoch)
best_model_path = trainer.checkpoint_callback.best_model_path
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
# ----------------------------------------------------
# Out of sample ... (this is wrong ... need to fix it)
encoder_data = data[lambda x: x.time_idx > x.time_idx.max() - max_encoder_length]
last_data = data[lambda x: x.time_idx == x.time_idx.max()]
decoder_data = pd.concat(
[last_data for i in range(1, max_prediction_length + 1)],
ignore_index=True,
)
num_decoder = len(decoder_data)
decoder_data["time_idx"] = list(range(num_decoder))
# combine encoder and decoder data
new_prediction_data = pd.concat([encoder_data, decoder_data], ignore_index=True)
new_raw_predictions, new_x = best_tft.predict(new_prediction_data, mode="raw", return_x=True)
return ys
XS = run(YS)
len(XS)
import matplotlib.pyplot as plt
plt.plot(YS[125:150],'*b')
plt.plot(XS[125:150],'g')
plt.legend(['data','prior'])
###Output
_____no_output_____ |
Download Expression Matrix for Scanpy/Download SmartSeq2 Expression Matrix as Input to Scanpy.ipynb | ###Markdown
Download SmartSeq2 Expression Matrix as an Input to Scanpy Suppose I want to get a SmartSeq2 expression matrix that I can analyze using scanpy. How can I go about finding something like this using the DSS API?
###Code
import hca.dss
client = hca.dss.DSSClient()
###Output
_____no_output_____
###Markdown
Well, first things first: We're going to need to search for a `.results` file. This file should contain what we need to put together an expression matrix.Here's our ElasticSearch query.
###Code
query = {
"query": {
"bool": {
"must": [
{
"wildcard": {
"manifest.files.name": {
# We need a *.results file...
"value": "*.results"
}
}
},
{
"range": {
"manifest.version": {
# ...and preferably not too old, either.
"gte": "2018-07-12T100000.000000Z"
}
}
}
]
}
}
}
###Output
_____no_output_____
###Markdown
This query looks for bundles that satisfy a *bool*ean condition consisting of two checks, both of which *must* be true. The *match* check looks for the `manifest.files.name` field and returns true if the name of a file listed in a bundle manifest ends with 'results'. The second check, *range*, returns true if the bundle's `manifest.version` has a value greater than or equal to 7/12/18. In short, __this query will find bundles that contain a `.results` file and are newer than July 12, 2018.__ Okay, now let's execute the search. Since the `files` section of the bundle is pretty long, I'll only print the portion containing a results file. If you want, you can always print the entire bundle to get a better picture of where the file is located.
###Code
import json
# Print part of a recent analysis bundle with a results file
bundles = client.post_search(es_query=query, replica='aws', output_format='raw')
the_first_bundle = bundles['results'][0]
bundle_files = the_first_bundle['metadata']['manifest']['files']
for f in bundle_files:
if f['name'].endswith('.results'):
results_file_uuid, results_file_version = f['uuid'], f['version']
print(results_file_uuid, results_file_version)
###Output
ec727ae1-d47a-47a3-8c8e-b42d7a0e8cf4 2019-05-18T173116.989870Z
###Markdown
Okay! It looks like we've found a file uuid we can use. Let's retrieve that file and save it locally.
###Code
results = client.get_file(replica='aws', uuid=results_file_uuid, version=results_file_version)
open('matrix.results', 'w').write(results.decode("utf-8"))
###Output
_____no_output_____
###Markdown
Here's what our file, `matrix.results`, looks like. I've truncated the output so it doesn't take up too much room.
###Code
print(open('matrix.results', 'r').read()[:852])
###Output
transcript_id gene_id length effective_length expected_count TPM FPKM IsoPct posterior_mean_count posterior_standard_deviation_of_count pme_TPM pme_FPKM IsoPct_from_pme_TPM
ENST00000373020.8 ENSG00000000003.14 2206 2016.44 0.00 0.00 0.00 0.00 0.00 0.00 0.12 0.21 9.99
ENST00000494424.1 ENSG00000000003.14 820 630.44 0.00 0.00 0.00 0.00 0.00 0.00 0.38 0.68 31.95
ENST00000496771.5 ENSG00000000003.14 1025 835.44 0.00 0.00 0.00 0.00 0.00 0.00 0.28 0.51 24.11
ENST00000612152.4 ENSG00000000003.14 3796 3606.44 0.00 0.00 0.00 0.00 0.00 0.00 0.07 0.12 5.59
ENST00000614008.4 ENSG00000000003.14 900 710.44 0.00 0.00 0.00 0.00 0.00 0.00 0.33 0.60 28.36
ENST00000373031.4 ENSG00000000005.5 1339 1149.44 0.00 0.00 0.00 0.00 0.00 0.00 0.21 0.37 23.47
ENST00000485971.1 ENSG00000000005.5 542 352.44 0.00 0.00 0.00 0.00 0.00 0.00 0.67 1.22 76.53
ENST00000371582.8
###Markdown
For our matrix, however, we might only want _some_ of these values. In my case, suppose I only want the `gene_id` and `TPM` values. We can extract these values easily using Python's `csv` module.
###Code
import csv
# Take the data we want out of the results file and store it into a tsv file
with open('matrix.results', 'r') as infile, open('matrix.tsv', 'w', newline='') as outfile:
reader = csv.DictReader(infile, delimiter='\t')
writer = csv.DictWriter(outfile, fieldnames=['gene_id', 'TPM'], delimiter='\t')
for row in reader:
writer.writerow({'gene_id': row['gene_id'], 'TPM': row['TPM']})
###Output
_____no_output_____
###Markdown
Our new file, `matrix.tsv`, looks something like this:
###Code
print(open('matrix.tsv', 'r').read()[:214])
###Output
ENSG00000000003.14 0.00
ENSG00000000003.14 0.00
ENSG00000000003.14 0.00
ENSG00000000003.14 0.00
ENSG00000000003.14 0.00
ENSG00000000005.5 0.00
ENSG00000000005.5 0.00
ENSG00000000419.12 0.00
ENSG00000000419.12 0.00
###Markdown
Now that we have a file containing what we want, we can transpose it and read it into scanpy.
###Code
import scanpy.api as sc
adata = sc.read_csv(filename='matrix.tsv', delimiter='\t').transpose()
###Output
Observation names are not unique. To make them unique, call `.obs_names_make_unique`.
Variable names are not unique. To make them unique, call `.var_names_make_unique`.
###Markdown
But how do we know that everything worked? Let's print our AnnData object (truncating the output again).
###Code
print(adata)
for i in range(0, 153):
print('{:<6}'.format('{:.1f}'.format(adata.X[i])), end='' if (i + 1) % 17 != 0 else '\n' )
###Output
AnnData object with n_obs × n_vars = 1 × 200468
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.5 0.0 0.0 0.0 0.0 0.0 167.1
0.0 3.2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
###Markdown
And just to make it easier to see the relevant data in our matrix...
###Code
for i in range(len(adata.X)-1):
if adata.X[i] != 0:
print(adata.X[i], end=' ')
###Output
6.53 167.05 3.25 2.52 3.04 1.0 7.05 6.08 12.84 10.88 3.11 10.06 8.9 12.26 9.91 1.38 11.71 69.56 89.88 1.45 0.53 10.06 64.21 0.85 39.42 21.5 2.81 21.39 2.46 1.37 0.57 41.7 1.05 6.21 3.64 5.22 11.23 48.57 0.22 6.72 8.65 2.24 0.57 0.86 1.27 1.17 2.48 0.91 9.6 168.78 7.53 434.51 279.39 3.62 1.31 2.09 1.75 130.58 1.08 0.66 7.79 5.89 13.77 5.16 3.22 2.04 0.4 1.67 3.34 2.07 2.22 0.39 11.2 4.03 0.65 2.68 1.93 2.8 36.79 1.35 30.25 2.69 11.25 0.51 22.75 1.92 0.86 0.52 49.27 634.34 650.86 149.68 0.67 24.23 1.02 5.38 10.23 0.38 4.57 11.69 23.09 4.82 4.11 8.05 0.63 10.47 5.71 1.76 1.7 1.3 8.48 0.8 11.97 2.45 11.12 1.39 3.75 31.57 0.46 3.08 2.78 1.91 9.58 11.81 14.14 548.93 1033.31 2.02 11.71 1.48 6.2 1.87 2.8 1.62 2.94 4.99 37.99 0.92 8.03 3.22 65.9 6.83 47.61 2.16 0.44 15.74 6.07 1.0 0.42 2.15 0.88 2.23 5.08 1.11 4.08 4.42 2.3 1.11 19.06 0.62 4.89 1.44 9.8 1.55 1.64 6.58 0.81 1.24 2.85 2.9 17.77 25.15 0.49 4.73 0.04 0.78 2.04 2.26 2.43 0.44 1.34 24.36 2.94 6.62 6.65 21.71 20.38 4.72 0.83 10.44 5.54 90.53 1.73 1.36 3.57 3.13 0.35 1.03 97.68 2.18 11.05 2.25 12.1 3.28 0.85 131.76 2.93 3.52 1.42 7.53 16.79 11.52 0.85 2.19 12.8 1.43 4.72 14.51 2.35 30.29 3.62 10.23 4.01 1.81 124.66 0.78 2.98 0.36 2.94 1.71 1.8 1.44 2.19 59.13 182.93 23.63 0.43 1.3 98.35 11.2 2.77 2.47 41.67 2.33 1.04 9.27 5.48 1.41 6.83 48.74 7.78 2.14 2.78 0.73 22.63 0.53 8.67 0.46 4.95 3.39 22.52 1.47 76.77 3.88 2.22 3.0 13.9 2.89 12.11 2.49 7.58 11.23 5.36 0.74 0.93 5.24 8.51 5.14 0.53 1.03 2.83 3.24 6.0 2.22 0.96 7.05 1.54 12.07 2.25 0.5 50.93 1.14 5.8 1.85 0.84 7.31 3.26 22.54 1.47 10.48 11.36 7.27 3.64 2.95 1.59 32.68 1.0 28.59 0.47 1.19 0.68 27.62 2.6 33.37 2.1 0.95 1.06 23.9 10.48 2.57 0.71 2.23 6.01 1.18 4.22 6.14 1.22 0.83 2.09 1.8 0.7 3.95 7.84 3.79 4.68 6.24 1.64 1.92 8.9 4.25 1.98 4.91 2.32 55.25 3.11 0.92 11.21 0.87 1.08 1.49 1.18 16.12 24.25 326.34 8.45 12.06 2.22 24.31 10.8 7.74 1.02 25.23 296.06 4.24 1.04 3.36 1.34 2.18 4.24 3.67 0.8 14.28 4.82 8.14 2.9 88.49 1.11 6.28 8.02 3.32 22.09 1.9 1.29 9.45 1.04 0.76 2.39 11.39 2.79 1.26 7.54 0.63 12.68 1.5 1.82 1.04 3.27 0.85 2.07 1.14 27.14 8.23 3.48 1.8 23.09 0.56 2.68 1.4 5.36 4.24 0.63 1.38 8.29 1.01 0.43 1.58 0.51 0.96 2.44 5.32 13.1 4.12 4.75 2.68 6.47 5.72 1.5 11.24 0.89 10.41 13.64 4.66 0.64 36.29 22.94 2.24 4.72 6.46 1.44 19.38 2.98 3.17 7.23 137.56 10.51 1.02 47.85 195.22 2.37 106.51 39.35 0.48 2.25 8.32 4.72 9.92 1.54 3.14 5.68 61.23 0.86 0.52 0.95 17.92 4.37 0.77 5.71 15.94 0.81 1.19 1.14 9.21 3.22 60.52 0.66 10.42 0.61 2.31 1.53 31.52 14.38 2.06 2.39 4.38 0.75 30.6 50.04 183.7 2.09 3.96 0.96 0.63 1.45 2.13 0.6 1.66 0.92 35.13 2.72 10.57 23.61 0.73 0.5 0.8 1.21 0.51 11.93 0.9 1.02 27.74 10.38 32.67 22.91 1.4 0.95 4.0 1.34 2.42 1.84 11.2 2.84 10.53 2.57 4.03 0.59 30.72 0.63 0.97 5.56 0.74 7.17 51.1 14.43 1.47 7.07 0.07 1.98 8.87 1.22 1.61 0.45 6.62 0.74 5.12 7.33 1.85 14.68 3.52 8.83 6.23 1.19 9.39 16.43 5.62 2.02 1.19 4.84 0.68 4.97 0.77 2.99 1.48 2.24 1.6 3.92 5.06 1.75 32.17 2.85 6.55 311.55 13.32 29.74 2.21 1.04 1.59 3.96 4.11 0.54 9.32 9.48 3.39 8.69 0.72 17.86 1.44 0.81 3.69 0.81 0.92 5.05 1.87 0.74 0.35 113.35 25.65 2.52 1.0 2.16 1.36 30.22 101.08 2.24 4.65 12.77 2.27 37.87 17.82 8.23 0.76 54.69 2.34 0.31 1.87 3.37 0.62 1.03 28.82 1.03 17.35 0.78 1.7 7.52 8.37 30.66 1.68 1.68 1.34 2.91 17.09 2.79 5.21 2.71 2.21 1.06 1.29 67.59 1.69 2.36 1.04 13.41 0.84 1.0 4.73 0.87 2.07 0.92 1.21 1.98 9.61 4.83 6.83 39.35 0.85 1.91 2.9 10.69 1.42 2.19 1.29 56.65 0.56 5.73 0.27 0.46 11.56 3.17 4.82 0.98 1.11 3.31 7.09 1.8 3.03 0.64 1.94 5.84 2.34 3.31 14.07 10.77 22.58 2.1 0.54 4.17 0.81 123.82 3.47 35.76 0.62 0.43 150.55 32.87 8.36 2.76 1.52 8.48 4.72 17.71 2.05 2.84 0.73 1.21 4.81 2.82 2.0 3.36 1.17 2.45 0.56 3.57 1.56 14.08 3.93 4.18 233.27 0.22 8.56 2.05 1.24 0.53 3.36 49.84 5.35 4.1 2.64 2.17 28.82 7.7 3.02 0.44 7.6 21.66 24.81 4.24 19.68 7.78 3.81 3.73 13.4 10.14 6.06 3.18 2.35 99.53 0.77 3.8 4.12 13.42 12.03 1.47 1.05 6.45 69.98 504.94 1.81 3.0 14.76 7.06 2.28 1.14 4.35 0.84 2.14 1.08 4.62 115.54 3.31 1.64 1.76 2.91 148.09 94.85 0.98 1.79 2.78 2.31 0.93 0.28 0.8 3.61 1.41 9.15 1.27 6.24 0.86 8.15 2.23 0.4 4.5 0.89 9.79 14.32 19.39 0.78 1.02 2.71 45.18 177.82 3.33 8.36 0.7 0.69 3.84 203.35 2.17 0.69 1.22 0.63 21.12 2.25 2.19 3.82 29.15 4.78 4.67 10.53 7.93 0.98 6.84 3.07 45.21 3.54 22.56 10.4 50.91 2.57 68.99 39.33 7.52 8.28 2.9 0.7 9.24 9.38 2.2 1.59 0.55 2.36 22.37 5.32 0.31 5.74 3.94 1.16 5.2 12.16 0.45 11.83 1.63 1.6 76.23 432.41 2.88 1.88 0.51 15.99 0.61 0.74 70.8 4.5 1.42 1.96 109.96 1.35 2.99 1.59 3.33 2.6 9.05 0.72 1.26 9.5 2.75 0.56 0.42 1.16 2.73 23.54 1.57 1.6 1.08 12.09 0.54 138.51 0.78 9.29 11.57 11.02 1.01 1.4 0.53 9.36 0.72 1.15 0.56 2.69 12.15 1.97 10.44 23.45 0.73 0.63 589.71 1.17 1.18 0.78 23.45 37.21 1.56 5.34 3.2 0.35 12.6 4.65 3.23 2.48 3.27 24.27 0.63 13.02 1.56 11.82 2.88 21.33 3.55 11.21 1.36 1.32 3.77 3.08 3.08 0.84 5.93 1.82 4.26 2.55 2.07 5.14 3.21 9.21 2.5 35.12 0.62 5.03 1.16 0.58 1.18 1.98 0.4 0.32 18.73 2.36 1.88 6.97 1.89 227.08 1.7 2.87 3.47 0.59 1.46 2.19 0.64 2.06 2.41 0.36 1.54 0.48 17.43 1.05 1.46 1.53 12.26 2.51 1.66 99.07 5.6 5.83 4.5 2.22 1.86 87.19 33.95 5.85 10.51 1.48 2.9 2.21 74.29 0.88 5.77 2.22 6.78 1.65 2.33 3.41 18.54 1.32 2.62 0.95 1.97 1.98 6.47 0.75 9.14 2.31 0.76 1.72 4.18 0.39 32.35 4.1 17.52 0.84 6.63 8.03 1.14 3.49 1.23 11.27 92.66 0.96 1.1 3.42 0.6 1.25 2.13 0.76 0.99 2.56 91.74 0.83 0.38 11.11 38.32 2.16 3.19 411.22 29.43 4.37 6.58 10.77 3.17 4.15 2.12 2.18 2.34 6.97 0.89 0.62 26.5 0.91 20.04 1.9 2.88 2.11 1.33 8.72 1.09 2.39 1.04 1.75 2.71 14.68 3.66 6.67 15.25 6.47 1.72 7.86 7.36 9.6 5.23 2.02 1.82 2.93 12.19 6.89 2.07 65.18 4.26 1.77 0.94 82.34 242.68 0.35 0.89 10.36 2.15 12.72 27.82 6.68 11.08 6.23 9.97 0.62 35.65 6.12 1.35 3.06 12.26 1.56 2.33 22.61 522.42 964.06 5.81 2.6 0.38 23.04 25.42 0.64 25.21 3.61 3.31 0.76 3.6 4.22 2.86 5.02 2.11 4.31 1.57 4.11 1.77 7.78 15.67 332.41 9.47 112.63 11.47 1.61 4.95 2.05 1.19 1.43 1.51 4.93 2.29 0.46 5.2 1.25 2.45 0.79 18.62 19.04 1.64 74.67 17.11 1.64 12.19 3.92 2.82 1.58 1.18 7.68 2.0 6.03 2.07 11.0 9.13 1.88 0.16 1.71 1.86 0.4 0.83 59.97 0.87 1.38 4.63 7.45 1.31 0.71 1.45 17.11 9.59 1.14 1.93 1.91 1.65 3.8 0.45 3.68 13.79 2.09 39.22 8.1 56.76 1.47 2.85 0.75 3.85 4.3 6.89 1.72 5.17 79.28 2.42 2.72 2.41 0.29 0.82 0.66 0.88 3.34 12.78 13.26 1.18 7.27 5.04 0.72 2.26 1.03 1.07 0.85 1.81 6.51 0.97 12.52 4.36 7.49 6.42 10.32 5.85 1.89 198.26 1.73 7.93 9.59 1.58 2.33 59.19 2.81 3.8 2.61 8.0 2.39 1.92 2.39 8.12 2.1 1.75 12.09 5.54 0.72 2.17 7.09 83.98 5.38 9.79 1.09 2.19 2.31 8.05 2.39 54.41 9.54 2.05 4.18 6.36 2.97 5.61 1.56 2.19 1.06 2.74 2.29 11.42 1.37 10.65 29.03 1.53 1.11 0.98 1.86 2.09 0.65 0.81 18.24 2.68 10.46 8.0 14.53 3.61 7.06 0.95 0.79 1.15 4.32 1.72 6.83 7.26 1.4 1.49 11.17 0.94 144.56 28.11 0.74 3.88 3.52 4.89 4.62 8.8 0.71 10.18 5.5 0.81 19.98 1.54 11.42 4.77 14.25 1.77 3.3 3.59 2.51 5.57 2.09 1.55 98.8 1.93 22.5 1.16 0.51 4.16 5.65 6.93 7.04 8.96 1.87 132.96 7.7 7.88 54.35 3.39 9.36 3.52 2.37 1.12 0.32 2.03 21.2 0.2 12.45 1.91 1.02 15.42 1.8 2.97 307.46 7.12 0.46 1.75 4.31 1.57 24.92 6.14 2.95 0.33 13.73 0.62 0.74 3.97 0.2 15.72 1.51 1.82 146.29 0.65 14.94 10.72 2.76 28.63 10.91 2.49 9.59 0.23 1.18 2.16 3.09 3.33 0.98 198.18 2.23 1.01 0.9 2.5 22.71 0.85 118.79 0.46 4.05 4.79 1.9 1.9 4.04 1.72 1.35 1.68 7.49 0.96 98.32 26.67 8.43 10.86 0.87 9.16 58.09 1.71 0.92 3.33 2.0 2.49 2.11 7.13 202.57 8.83 2.49 14.62 0.78 2.89 0.61 409.27 561.15 11.72 0.64 1.29 31.7 2.09 12.85 2.11 1.58 1.7 1.8 2.72 679.33 1.55 8.83 132.02 220.41 265.84 0.85 0.8 3.31 9.92 0.9 2.4 11.8 1.83 11.08 2.68 1.25 317.51 1.07 4.6 1.27 11.61 3.64 5.43 0.77 73.39 12.8 6.01 11.69 73.39 1.28 0.47 1.1 11.0 2.5 134.02 1.02 1.64 8.45 23.5 1.05 1.69 26.39 2.19 1.1 0.69 1.62 1.19 1.24 1.35 1.0 1.32 10.3 53.66 4.06 33.99 1.29 1.18 1.1 5.14 1.67 1.11 0.55 19.02 8.66 1.36 1.09 1.96 3.02 12.39 7.26 6.65 22.52 1.3 4.47 1.94 2.9 0.71 0.35 1.7 1.61 5.34 125.14 1.57 1.53 3.35 0.51 0.81 11.98 1.37 2.77 11.15 3.95 1.09 36.25 1.25 2.81 2.92 6.35 3.54 6.8 0.95 1.24 20.95 4.0 1.21 2.54 6.19 1.76 2.91 7.27 74.44 2.14 10.36 2.16 28.23 1.74 88.37 11.63 1.41 4.16 10.52 1.62 26.27 8.27 7.66 97.92 21.15 20.62 0.23 0.55 1.97 69.96 7.85 8.58 2.06 4.07 0.77 10.83 0.78 54.02 0.68 3.81 0.34 1.49 2.03 0.53 0.9 3.3 0.37 2.19 1.97 2.06 2.64 88.83 2.77 4.48 0.95 0.99 1.92 1.31 11.93 2.68 2.36 13.81 21.34 1.17 2.66 0.12 7.34 4.89 1.74 4.98 0.65 4.53 13.76 17.66 3.22 0.66
###Markdown
Okay, so we have one AnnData object we can use with scanpy. __But what if we want a second one?__ Let's go through the steps again, this time with a different `.results` file. We can use the same query to get a bunch of analysis bundles, but this time get our `.results` file from the _second_ bundle.
###Code
the_second_bundle = bundles['results'][1]
bundle_files = the_second_bundle['metadata']['manifest']['files']
for f in bundle_files:
if f['name'].endswith('.results'):
results_file_uuid, results_file_version = f['uuid'], f['version']
###Output
_____no_output_____
###Markdown
With the new uuid, we can get the `.results` file itself.
###Code
results2 = client.get_file(replica='aws', uuid=results_file_uuid, version=results_file_version)
open('matrix2.results', 'w').write(results2.decode("utf-8"))
###Output
Waiting 10s before redirect per Retry-After header
###Markdown
Again, let's take the data we want out of the results file and store it into a tsv file...
###Code
with open('matrix2.results', 'r') as infile, open('matrix2.tsv', 'w', newline='') as outfile:
reader = csv.DictReader(infile, delimiter='\t')
writer = csv.DictWriter(outfile, fieldnames=['gene_id', 'TPM'], delimiter='\t')
for row in reader:
writer.writerow({'gene_id': row['gene_id'], 'TPM': row['TPM']})
###Output
_____no_output_____
###Markdown
...and then create another AnnData object & print it.
###Code
adata2 = sc.read_csv( filename='matrix.tsv', delimiter='\t' ).transpose()
print(adata2)
for i in range(0, 153):
print( '{:<6}'.format('{:.1f}'.format(adata2.X[i])), end='' if (i + 1) % 17 != 0 else '\n' )
###Output
Observation names are not unique. To make them unique, call `.obs_names_make_unique`.
Variable names are not unique. To make them unique, call `.var_names_make_unique`.
|
notebooks/001-run-randomforest.ipynb | ###Markdown
Urination
###Code
CATEGORY = "Urination"
dataset_config = config.DATASET_CONFIG[CATEGORY]
complete_ids = load_annotation.get_complete_ids(
category = CATEGORY
)
selected_ids = complete_ids[:60]
TRAIN_IDS, TEST_IDS = train_test_split(selected_ids, seed=1234)
print(f"Category: {CATEGORY}")
print(f"Training {len(TRAIN_IDS)} use_ids: {TRAIN_IDS[:5]}...")
print(f"Testing {len(TEST_IDS)} use_ids: {TEST_IDS[:5]}...")
train_config = dataset_config.copy()
test_config = dataset_config.copy()
train_config['USER_IDS'] = TRAIN_IDS
test_config['USER_IDS'] = TEST_IDS
dataset = {}
dataset['train'] = make_dataset.RandomForestExtended(train_config)
dataset['test'] = make_dataset.RandomForestExtended(test_config)
# it may take around 20min to run
train_x, train_y = dataset['train'].get_features_and_labels_from_users()
test_x, test_y = dataset['test'].get_features_and_labels_from_users()
print(f"train_x.shape = {train_x.shape}, test_x.shape = {test_x.shape}")
print(f"#positive/#total train_y = {sum(train_y)}/{len(train_y)}")
print(f"#positive/#total test_y = {sum(test_y)}/{len(test_y)}")
rf = RandomForestClassifier(n_estimators = 30)
rf.fit(train_x, train_y)
classification_result(
rf,
test_x, test_y,
threshold = 0.3
)
classification_result(
rf,
test_x, test_y,
threshold = 0.2
)
variable_importance(train_x, rf)
current_time = date.today().strftime("%Y-%m-%d")
model_name = f"../models/urination-rf-extended-embedding-{current_time}.pkl"
with open(model_name, "wb") as f:
pickle.dump(rf, f)
###Output
_____no_output_____
###Markdown
Defecation
###Code
CATEGORY = "Defecation"
dataset_config = config.DATASET_CONFIG[CATEGORY]
complete_ids = load_annotation.get_complete_ids(
category = CATEGORY
)
selected_ids = [idx for idx in complete_ids if idx <= 1950 and idx >= 1800]
TRAIN_IDS, TEST_IDS = train_test_split(selected_ids)
print(f"Category: {CATEGORY}")
print(f"Training {len(TRAIN_IDS)} use_ids: {TRAIN_IDS[:5]}...")
print(f"Testing {len(TEST_IDS)} use_ids: {TEST_IDS[:5]}...")
train_config = dataset_config.copy()
test_config = dataset_config.copy()
train_config['USER_IDS'] = TRAIN_IDS
test_config['USER_IDS'] = TEST_IDS
dataset = {}
dataset['train'] = make_dataset.RandomForestExtended(train_config)
dataset['test'] = make_dataset.RandomForestExtended(test_config)
train_x, train_y = dataset['train'].get_features_and_labels_from_users()
test_x, test_y = dataset['test'].get_features_and_labels_from_users()
print(f'train_x.shape: {train_x.shape} test_x.shape: {test_x.shape}')
print(f'No. Positive in training {train_y.sum()}/{train_y.shape}')
print(f'No. Positive in testing {test_y.sum()}/{test_y.shape}')
rf = RandomForestClassifier(
n_estimators = 10,
class_weight = "balanced"
)
rf.fit(train_x, train_y)
classification_result(
rf,
test_x, test_y,
threshold = 0.3
)
classification_result(
rf,
test_x, test_y,
threshold = 0.4
)
variable_importance(train_x, rf)
current_time = date.today().strftime("%Y-%m-%d")
model_name = f"../models/defecation-rf-extended-embedding-{current_time}.pkl"
with open(model_name, "wb") as f:
pickle.dump(rf, f)
###Output
_____no_output_____ |
pytorch-implementation/model-training.ipynb | ###Markdown
Setup Create FilesystemThis notebook is primarily meant to be executed in Colab as a computational backend. If you want to run on your own hardware with data, you need to set `data_dir` and `ALLOW_IO`This notebook viewable directly on Colab from [https://colab.research.google.com/github/rcharan/phutball/blob/master/pytorch-implementation/model-training.ipynb](https://colab.research.google.com/github/rcharan/phutball/blob/master/pytorch-implementation/model-training.ipynb) (it is a mirror of github). But if it has moved branches or you are looking at a past commit, look at the [Google instructions](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb) on where to find this file.The workflow is: - Data stored in (my personal/private) Google Drive - Utilities/library files (for importing) on github, edited on local hardware and pushed to github. - Notebook hosted on github, edited both in Colab or locally (depending on the relative value of having a GPU attached versus being able to use regular Jupyter keyboard shortcuts/a superior interface)
###Code
# Attempt Colab setup if on Colab
try:
import google.colab
except:
ALLOW_IO = False
else:
# Mount Google Drive at data_dir
# (for data)
from google.colab import drive
from os.path import join
ROOT = '/content/drive'
DATA = 'My Drive/phutball'
drive.mount(ROOT)
ALLOW_IO = True
data_dir = join(ROOT, DATA)
!mkdir "{data_dir}" # in case we haven't created it already
# Pull in code from github
%cd /content
github_repo = 'https://github.com/rcharan/phutball'
!git clone -b master {github_repo}
%cd /content/phutball
# Point python to code base
import sys
sys.path.append('/content/phutball/pytorch-implementation')
###Output
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly
Enter your authorization code:
··········
Mounted at /content/drive
mkdir: cannot create directory ‘/content/drive/My Drive/phutball’: File exists
/content
Cloning into 'phutball'...
remote: Enumerating objects: 259, done.[K
remote: Counting objects: 100% (259/259), done.[K
remote: Compressing objects: 100% (165/165), done.[K
remote: Total 2243 (delta 175), reused 151 (delta 92), pack-reused 1984[K
Receiving objects: 100% (2243/2243), 38.58 MiB | 39.51 MiB/s, done.
Resolving deltas: 100% (1426/1426), done.
/content/phutball
###Markdown
Imports
###Code
# !git pull
%%capture
%load_ext autoreload
%autoreload 2
import os
import gc
import numpy as np
# Codebase
from lib.models.model_v3 import TDConway
from lib.off_policy import EpsilonGreedy
from lib.optim import AlternatingTDLambda
from lib.training import training_loop
from lib.utilities import config, lfilter, Timer, product
# from lib.testing_utilities import create_state, visualize_state, boards
from lib.move_selection import get_next_move_training
from lib.pretraining.fit_one_cycle import fit_one_cycle
from lib.pretraining.pre_training import pre_train
from torch.optim import SGD
from lib.arena import Player, Battle, RandoTron
# Time Zone management utilities
from datetime import datetime
from pytz import timezone
eastern = timezone('US/Eastern')
# Graphics for visualization
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
plt.ioff()
###Output
_____no_output_____
###Markdown
Device Management UtilitiesSetup for GPU, CPU, or (not working well/fully implemented) TPU
###Code
import os
def install_tpu():
VERSION = "1.5"
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
!python pytorch-xla-env-setup.py --version $VERSION
use_tpu = 'COLAB_TPU_ADDR' in os.environ
import torch
if use_tpu:
# Install PyTorch/XLA
install_tpu()
import torch_xla
import torch_xla.core.xla_model as xm
# Set the device
device = xm.xla_device()
# Memory inspection
def print_memory_usage():
print('TPU memory inspection not implemented')
def print_max_memory_usage():
print('TPU memory inspection not implemented')
def garbage_collect():
gc.collect() # No TPU specific implementation yet
elif torch.cuda.is_available():
# Set the device
device = torch.device('cuda')
# Echo GPU info
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
print(gpu_info)
# Memory inspection and management
from lib.memory import (
print_memory_usage_cuda as print_memory_usage,
print_max_memory_usage_cuda as print_max_memory_usage,
garbage_collect_cuda as garbage_collect
)
else:
# Set the device to CPU
device = torch.device('cpu')
# Echo RAM info
from psutil import virtual_memory
from lib.memory import format_bytes
ram = virtual_memory().total
print(format_bytes(ram), 'available memory on CPU-based runtime')
# Memory inspection and management
from lib.memory import (
print_memory_usage,
print_max_memory_usage,
garbage_collect
)
###Output
Sat Jul 18 18:55:54 2020
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 450.51.05 Driver Version: 418.67 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |
| N/A 34C P0 25W / 250W | 10MiB / 16280MiB | 0% Default |
| | | ERR! |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
Utilities
###Code
def save(fname, model):
state_dict = {
'model' : model.state_dict(),
}
torch.save(state_dict, f'{data_dir}/{fname}.pt')
def fname(version, game_num):
return f'v{version}-{game_num}'
def load(version, game_num, model):
sd = torch.load(f'{data_dir}/{fname(version, game_num)}.pt')
model.load_state_dict(sd['model'])
return model
###Output
_____no_output_____
###Markdown
Training Fit one cycle
###Code
model = TDConway(config).to(device)
data = fit_one_cycle(model)
df = pd.DataFrame(data, columns = ['lr', 'loss'])
df['loss'] = df.loss.apply(lambda l : l.item())
df = df.groupby('lr').mean().reset_index()
fig, ax = plt.subplots()
sns.lineplot(x = 'lr', y='loss', data = df, ax = ax)
ax.set_xscale('log', basex = 10)
ax.set_yscale('log', basey = 10)
fig
###Output
_____no_output_____
###Markdown
Pretraining
###Code
model = TDConway(config).to(device)
optimizer = SGD(model.parameters(), lr = 0.05)
pre_train(model, optimizer, loops = 40000, batch_size = 300)
version = '0.5.1'
game_num = 'pre40000x300'
save(fname(version, game_num), model)
###Output
_____no_output_____
###Markdown
Profile
###Code
initial_state = create_state('H10').to(device)
epsilon_greedy = EpsilonGreedy(0.01)
def num_params(model):
return sum(
product(t.shape) for t in model.parameters()
)
# version = '0.3.1'
# game_num = '70000'
model = TDConway(config, temperature = 0.1).to(device)
model = load(version, game_num, model)
optimizer = AlternatingTDLambda(model.parameters(), alpha = 0.05, lamda = 0.9)
print(f'{num_params(model):,d} parameters')
%timeit get_next_move_training(initial_state, model, device)
%timeit training_loop(model, optimizer, 1, device, off_policy = epsilon_greedy)
# %prun training_loop(model, optimizer, 1, device, off_policy = epsilon_greedy)
###Output
1,935,105 parameters
100 loops, best of 3: 3.28 ms per loop
1/1 [==============================] - 0s 622us/step
1/1 [==============================] - 0s 188us/step
1/1 [==============================] - 0s 192us/step
1/1 [==============================] - 0s 172us/step
1 loop, best of 3: 473 ms per loop
###Markdown
Train
###Code
epsilon_greedy = EpsilonGreedy(0.05)
model = TDConway(config, temperature = 0.1).to(device)
optimizer = AlternatingTDLambda(model.parameters(), 0.05, 0.9)
version = '0.5.1'
game_num = 'pre40000x300'
load(version, game_num, model);
game_num = 0
batch_size = 500
while True: # Until Colab or User disconnects out of boredom
try:
training_loop(model, optimizer, batch_size, device,
off_policy = epsilon_greedy,
verbose = 1,
initial = 0.1)
game_num += batch_size
save(fname(version, game_num), model)
print(f'Finished {game_num} games at', datetime.now(eastern).strftime('%I:%M%p %Z'))
except KeyboardInterrupt:
break
# Versioning:
# Major versions - major change in approach
# Minor versions - incompatible architecture tweaks
# Build - retraining or changes in training parameters
# Game number - number of games trained or pre{E}x{B} where E is the the
# number of batches and B is the batch size for pre-training
# Example: v0.1.2 @400 is the second attempt at training the v0.1 architecture
# and was trained for 400 games
# Performance benchmarks.
# GPU benchmarks are on a P100 unless otherwise stated
# per move : training-relevant
# forward pass: evaluation (arena mode) relevant
# CPU benchmarks are for inference on a fixed set of 300 randomly
# generated boards on an Intel i5 chipset. (deployment-relevant)
# Memory consumption has not been an issue
# v0.1: architecture from model_v1. Training: Alternating TD(λ)
# ~60M params (59,943,809)
# GPU: 100–110ms/move with 50-60ms for forward pass
# CPU: 8.1s ± 0.5s
# alpha = 0.01, lambda = 0.9, epsilon = 0.1
#
# - v0.1.1 - don't use, bug in training
# - v0.1.2 - Use. available @400. Win rate v RandoTron 51% (😢)
# v0.2: architecture from model_v2. Smaller Residual ConvNet (17 layers)
# Training: Alternating TD(λ) WITH pretraining to prefer the ball to the
# right on randomly generated boards
# ~4.4M params (4,381,505)
# GPU: 30-35ms/move with ~12ms for forward pass
# CPU: 1.1s ± 0.02s
#
# - v0.2.1 - Available @pre10000x300, @400, @1500, @3500
# - Hyperparameters same as v0.1
# Win rate v RandoTron
# - @pre-trained: 75.4% ±1.4% (!?)
# - @400: 49%
# - @1500: 56.9% ±1.8%
# - @3500: 54.8% ±1.7%
# - In further increments of 500 as [4000, 4500, ..., 20500]
# - @10500: 60.3% ±1.6%
# - @17500: 59.5% ±1.2%
# - v0.2.2 - Increased pretraining, epsilon = 0.01
# - Available @pre30000x300 (71.2% ± 2.3%)
# - And in increments of 500 [500, 1000, ..., 82000]
# - @17500: 71.5% ±1.1%
# - @82000: 99.8% ±0.1%
# v0.3: architecture from model_v3. Much smaller (7 layers), no residuals
# ~1.9M params (1,935,105)
# alpha = 0.05, epsilon = 0.1, lambda = 0.9
#
# -v0.3.1 - Available at @pre40000x300, increments of 1000
# - @27000: 71.3% ±1.1%
# - @73000: 58.8%
# -v0.3.2 - With ε = 0.05
###Output
_____no_output_____
###Markdown
Evaluate
###Code
# Player 1
model = TDConway(config).to(device)
version = '0.5.1'
game_num = 4000
sd = torch.load(f'{data_dir}/{fname(version, game_num)}.pt', map_location = device)
model.load_state_dict(sd['model'])
td_conway = Player(model, name = f'TD Conway v{version} @{game_num}')
td_conway.eval()
randotron = RandoTron()
battle = Battle(td_conway, randotron, verbose = 1)
battle.play_match(1600, device)
# Player 1 @3500
model = TDConway(config).to(device)
version = '0.2.1'
game_num = 3500
sd = torch.load(f'{data_dir}/{fname(version, game_num)}.pt', map_location = device)
model.load_state_dict(sd['model'])
td_conway_1 = Player(model, name = f'TD Conway v{version} @{game_num}')
td_conway_1.eval()
# Player 2 @ 10500
model = TDConway(config).to(device)
version = '0.2.1'
game_num = 10500
sd = torch.load(f'{data_dir}/{fname(version, game_num)}.pt', map_location = device)
model.load_state_dict(sd['model'])
td_conway_2 = Player(model, name = f'TD Conway v{version} @{game_num}')
td_conway_2.eval()
battle = Battle(td_conway_1, td_conway_2, verbose = 1)
battle.play_match(900, device)
###Output
_____no_output_____ |
Part 1 - questionnaire scales - 2014-11.ipynb | ###Markdown
Search & display functions
###Code
# using this for inline documentation so that it's clear
# that the printing statement isn't part of the necessary
# transformation code.
def html_print(df):
from IPython.display import HTML
try:
out = df.to_html()
except AttributeError:
out = pd.DataFrame(df).to_html()
return HTML(out)
def htmljoin(df_list, delimiter=''):
from IPython.display import HTML
return HTML(delimiter.join([x.to_html() for x in df_list]))
def col_matches(df, regex):
import re
cols = list(enumerate(df.columns))
matches = [c for (i, c) in cols
if re.findall(regex, c)]
return matches
def concat_matches(df, *args):
assert all([len(r) for r in args])
import re
col_match_lists = [col_matches(df, regex) for regex in args]
col_set = [df[matches] for matches in col_match_lists]
if len(col_set) == 0:
return None
elif len(col_set) == 1:
return col_set[0]
else:
return pd.concat(col_set, axis=1)
def show_frames(frame_list, delimiter=''):
from IPython.display import HTML
if len(frame_list) == len(delimiter):
html_out = ""
item_template = '<p><strong>{}</strong></p>{}<br>'
for i, tup in enumerate(zip(frame_list, delimiter)):
frame = tup[0]
tag = tup[1]
html_out += item_template.format(tag, frame.to_html())
return HTML(html_out)
else:
html_out = [df.to_html() for df in frame_list]
return HTML(delimiter.join(html_out))
def compare_transformations(df, columns, functions, **kwargs):
print('raw')
df[columns].hist(**kwargs)
plt.show()
for name, func in functions.items():
print(name)
df[columns].apply(func).hist(**kwargs)
plt.show()
def quickcompare(r, size=(15,7)):
return compare_transformations(dfo, col_matches(dfo, r),
{'log1p': np.log1p,
'sqrt': np.sqrt, },
figsize=size)
###Output
_____no_output_____
###Markdown
Load data and set participant ID index
###Code
dfo = pd.read_csv(input_scales_csv,
index_col='pid',
)
def add_leading_zeroes(pid):
no_leading_zero = str(pid)
with_leading_zeroes = '000'[len(no_leading_zero):] + no_leading_zero
return with_leading_zeroes
dfo.index = dfo.reset_index('pid').pid.apply(add_leading_zeroes)
dfo[::12].T.head()
###Output
_____no_output_____
###Markdown
Missing values
###Code
#show missing values from original CSV entry ('77777')
html_print(dfo.loc['053'].head(10)) #example P with missing WASI subtest
# Replace NaNs in note-taking columns with blanks for readability
def note_columns():
note_columns = col_matches(dfo, "notes")
assert len(note_columns) == 25
more_note_columns = ['qbasic_ethnicityother',
'qmusic_dancestyle',
'qmusic_drumstyles',
'qmusic_instrumentlist',
'qmusic_gamenames']
assert set(more_note_columns).issubset(dfo.columns)
note_columns += more_note_columns
assert len(note_columns) == 30
return note_columns
dfo.loc[:, note_columns()] = dfo.loc[:, note_columns()].fillna("")
# Replace missing data (coded "77777") with NaNs
# (but will change this back before exporting for external analyses)
dfo.replace('77777', np.nan, inplace=True)
html_print(dfo[30:40].T[18:22])
# View missing values
htmljoin([pd.DataFrame(row[row.isnull()])[:7].T
for i, row in dfo.iterrows()
if len(row[row.isnull()]) > 0])
###Output
_____no_output_____
###Markdown
Transforming questionnaire items Quantiles and dichotomizing
###Code
simple_hour_items = col_matches(dfo, 'hours$') # '$' --> matches only end of name
simple_hour_items
# Extreme floor effect for "practice hours" items when viewing
# the overall sample, so:
# Zero or nonzero monthly practice hours?
hour_counts_A = ['qmusic_singinghours',
'qmusic_singingtimes',
'qmusic_dancehours',
'qmusic_instrumenthours',
'qmusic_drumhours',
'qmusic_behaviors_09_danceprv',
'qmusic_behaviors_10_dancepub',
'qmusic_gamehoursall',
'qmusic_gamehoursdrumsticks',
]
for varname in hour_counts_A:
s = dfo[varname]
is_nonzero = (s > 0) # --> False, True
dfo[varname + '_nonzero'] = is_nonzero.astype(int) # --> 0, 1
#pleasant matplotlib style set by pandas library
pd.options.display.mpl_style = 'default'
print('raw distributions:')
dfo[hour_counts_A].hist(figsize=(10,10))
plt.show()
print(concat_matches(dfo, '_nonzero$').head(4).T)
pos_skewed_vars = ['qmusic_behaviors_07_yourself',
'qmusic_behaviors_08_otherprs',
'qmusic_behaviors_09_danceprv',
'qmusic_dancelevel']
compare_transformations(dfo, pos_skewed_vars,
{'log1p': np.log1p,
'sqrt': np.sqrt,},
figsize=(15,7))
for varname in pos_skewed_vars:
s = dfo[varname]
dfo[varname + '_ln1p'] = np.log1p(s) #ln of (x + 1)
from functools import partial
medsplit = partial(pd.qcut, q=2, labels=False)
compare_transformations(dfo,
['qmusic_dancelevel'],
{'medsplit': medsplit},
figsize=(10,3))
dfo['qmusic_dancelevel_tophalf'] = medsplit(dfo['qmusic_dancelevel'])
# same as recoding as (0-1) --> 0
# (2-7) --> 1
compare_transformations(dfo,
col_matches(dfo, 'rel_totalmonths'),
{'log1p': np.log1p,
'sqrt': np.sqrt, },
figsize=(10,3))
quickcompare('times$', size=(5,3))
#zero-indexed order of tasks of this ISI among the two types
dfo['orders_500'] = 1 - dfo.order_500ms_first
dfo['orders_800'] = 0 + dfo.order_500ms_first
# entered as strings in session_taskorder:
# 1. Iso, Lin, Phase
# 2. Iso, Phase, Lin
# 3. Lin, Iso, Phase
# 4. Lin, Phase, Iso
# 5. Phase, Iso, Lin
# 6. Phase, Lin, Iso
# first character (number indicating overall order)
order_n = dfo['session_taskorder'].apply(lambda x: int(x[0]))
#zero-indexed order of tasks of this type among the three types
isochronous_placement = {1: 0, 2: 0, 3: 1,
4: 2, 5: 1, 6: 2,}
phaseshift_placement = {1: 2, 2: 1, 3: 2,
4: 1, 5: 0, 6: 0,}
linearchange_placement = {1: 1, 2: 2, 3: 0,
4: 0, 5: 2, 6: 1,}
dfo['orders_iso'] = order_n.apply(lambda x: isochronous_placement[x])
dfo['orders_phase'] = order_n.apply(lambda x: phaseshift_placement[x])
dfo['orders_linear'] = order_n.apply(lambda x: linearchange_placement[x])
dfo['orderc_iso_before_lin'] = (dfo.orders_iso < dfo.orders_linear).astype(int)
dfo['orderc_iso_before_phase'] = (dfo.orders_iso < dfo.orders_phase).astype(int)
dfo['orderc_phase_before_lin'] = (dfo.orders_phase < dfo.orders_linear).astype(int)
# Set up precice ordering of each task in the set (1-indexed)
# practice trials of isochronous/single-stimulus
dfo['order_iso5t1'] = dfo.orders_500 + 1
dfo['order_iso8t1'] = dfo.orders_800 + 1
# +1 to set from zero-indexed to one-indexed,
# then +2 because the the first two have passed (iso5t1 and iso5t2)
dfo['order_iso5t2'] = 2 + (2 * dfo.orders_iso) + dfo.orders_500 + 1
dfo['order_iso8t2'] = 2 + (2 * dfo.orders_iso) + dfo.orders_800 + 1
dfo['order_psh5t'] = 2 + (2 * dfo.orders_phase) + dfo.orders_500 + 1
dfo['order_psh8t'] = 2 + (2 * dfo.orders_phase) + dfo.orders_800 + 1
dfo['order_lin5t'] = 2 + (2 * dfo.orders_linear) + dfo.orders_500 + 1
dfo['order_lin8t'] = 2 + (2 * dfo.orders_linear) + dfo.orders_800 + 1
dfo['order_iso5j'] = 6 + dfo['order_iso5t2'] #already adjusted to 1-index
dfo['order_iso8j'] = 6 + dfo['order_iso8t2']
dfo['order_psh5j'] = 6 + dfo['order_psh5t']
dfo['order_psh8j'] = 6 + dfo['order_psh8t']
dfo['order_lin5j'] = 6 + dfo['order_lin5t']
dfo['order_lin8j'] = 6 + dfo['order_lin8t']
dfo['order_isip5'] = dfo['order_iso5t1'] + 2 + 6 + 6
dfo['order_isip8'] = dfo['order_iso8t1'] + 2 + 6 + 6
html_print(concat_matches(dfo, '^order|taskorder')[::20].T.sort())
#Looking at high-end outliers, no transformations done here
hours_col_names = col_matches(dfo, 'hours')
hours_col_names += col_matches(dfo, 'behaviors_07')
hours_col_names += col_matches(dfo, 'behaviors_08')
hours_col_names += col_matches(dfo, 'behaviors_09')
hours_col_names += col_matches(dfo, 'behaviors_10')
hours_cols = dfo[hours_col_names]
frames = [hours_cols.sort(c, ascending=False).head(8).T
for c in hours_cols.columns]
show_frames(frames, hours_col_names)
#log_hours_columns = np.log1p(hours_cols)
#log_hours_columns.sort(col)
quickcompare('^calc')
concat_matches(dfo, 'sumhours', 'qmusic_dance', 'iq').T
#Finally: set NaNs back to '77777'
dfo.replace(np.nan, '77777', inplace=True)
scales_output_updated = '2014-10-29a'
prefix = "c:/db_pickles/pickle - dfo-scales - "
import cPickle as pickle
output_file= prefix + scales_output_updated + '.pickle'
pickle.dump(dfo, open(output_file, "wb"))
# Proceed with pickle to Part 5
###Output
_____no_output_____ |
sentence_embeddings_examples.ipynb | ###Markdown
###Code
!pip install -U sentence-transformers
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('bert-base-nli-mean-tokens')
sentences = ['This framework generates embeddings for each input sentence',
'Sentences are passed as a list of string.',
'The quick brown fox jumps over the lazy dog.']
sentence_embeddings = model.encode(sentences)
for sentence, embedding in zip(sentences, sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", len(embedding), embedding[:10])
print("")
sentence_embeddings = model.encode([
'Мама мыла раму'
])
rus_sentences = ['мама мыла раму', 'рама мыла маму']
rus_sentences_embeddings = model.encode(rus_sentences)
for sentence, embedding in zip(rus_sentences, rus_sentences_embeddings):
print("Sentence:", sentence)
print("Embedding:", len(embedding), embedding[:10])
print("")
tatar_sentences = ['Вахит Имамовның бу китабын экстремистик китап дип бәяләргә тырышып, тыю өчен суд-мәхкәмә эшләре бара.',
'Суд киләсе елда эшен дәвам итәчәк.',
'Ә әлегә документаль әсәр экспертиза уза.',
'Әлеге китапны ни сәбәпледер, мин дә үз вакытында укымый калганмын']
tat_sentences_embeddings = model.encode(tatar_sentences)
for sentence, embedding in zip(tatar_sentences, tat_sentences_embeddings):
print("Sentence:", sentence)
print("Embedding:", len(embedding), embedding[:10])
print("")
my_sentences = ['Mein Hund ist gross', 'Meine Katze ist gross', 'Mein Hund ist klein', 'Meine Katze ist klein']
my_sentence_embeddings = model.encode(my_sentences)
for sentence, embedding in zip(my_sentences, my_sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", len(embedding), embedding[:10])
print("")
###Output
_____no_output_____ |
experiments/hypersingular_accuracy.ipynb | ###Markdown
\begin{equation}\int_{S} K(x, y) y_2 dy\end{equation} Ideas:* there could be a bug in adaptive.hpp* maybe recursive subdivision is better than gauss-kronrod for this type of problem.* ~~kahan summation might be necessary. perhaps the adding and subtracting of the error causes problems?~~* align the python numpy kernels with the nearfield.cpp kernels.
###Code
from tectosaur2.nb_config import setup
setup()
import numpy as np
from tectosaur2 import gauss_rule, integrate_term
from tectosaur2.mesh import unit_circle
from tectosaur2.laplace2d import hypersingular
from tectosaur2.global_qbx import global_qbx_self
quad_rule = gauss_rule(10)
circle = unit_circle(quad_rule)
circle.n_panels
gmats = []
gvecs = []
v = np.cos(circle.pts[:,1])
ps = np.arange(3, 8)
for p in ps:
gmat, report = global_qbx_self(hypersingular, circle, p, 1.0, 10, return_report=True)
gmats.append(gmat)
gvecs.append(gmat[:,:,:,0].dot(v))
lmat, lreport = integrate_term(hypersingular, circle.pts, circle, safety_mode=True, return_report=True)
lvec = lmat[:,:,:,0].dot(v)
print(np.max(np.abs(lvec - gvecs[-1])))
lmat, lreport = integrate_term(hypersingular, circle.pts, circle, return_report=True)
lvec = lmat[:,:,:,0].dot(v)
print(np.max(np.abs(lvec - gvecs[-1])))
for i in range(1, ps.shape[0]):
print('\n', i)
print(np.max(np.abs(gmats[i] - gmat[i - 1])))
print(np.max(np.abs(gvecs[i] - gvecs[i - 1])))
###Output
1
4.657536261689784
8.12064950692637e-05
2
5.704405858601769
7.705862049567358e-07
3
8.12835335669428
3.9674554669355544e-08
4
9.690547112513867
2.4950611021701263e-10
###Markdown
Analytic comparison Let's use the analytic solution for stress for slip on a line segment in a fullspace extending from y = -1 to y = 1. From page 35 of the Segall book.
###Code
import sympy as sp
import matplotlib.pyplot as plt
from tectosaur2 import panelize_symbolic_surface, pts_grid
t = sp.var('t')
fault = panelize_symbolic_surface(t, 0*t, t, quad_rule, n_panels=1)
def analytical_stress(obsx, obsy):
rp = obsx ** 2 + (obsy + 1) ** 2
ri = obsx ** 2 + (obsy - 1) ** 2
sxz = -(1.0 / (2 * np.pi)) * (((obsy + 1) / rp) - ((obsy - 1) / ri))
syz = (1.0 / (2 * np.pi)) * ((obsx / rp) - (obsx / ri))
return sxz, syz
def run(zoomx, zoomy):
nobs = 200
xs = np.linspace(*zoomx, nobs)
ys = np.linspace(*zoomy, nobs)
obs_pts = pts_grid(xs, ys)
obsx = obs_pts[:, 0]
obsy = obs_pts[:, 1]
sing = np.array([(0,-1), (0, 1)])
stress_mat, report = integrate_term(hypersingular, obs_pts, fault, singularities=sing, return_report=True)
interior_stress = stress_mat[:,:,:,0].sum(axis=2)
analytical_sxz, analytical_syz = analytical_stress(obsx, obsy)
interior_sxz = interior_stress[:,0]
interior_syz = interior_stress[:,1]
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sxz_err = np.log10(np.abs(interior_sxz - analytical_sxz))
syz_err = np.log10(np.abs(interior_syz - analytical_syz))
plt.figure(figsize=(12, 8))
plots = [
("analytical_sxz", "$\sigma_{xz}$"),
("analytical_syz", "$\sigma_{yz}$"),
("sxz_err", r"$\log_{10}|\sigma_{xz,\textrm{BIE}} - \sigma_{xz,\textrm{analytic}}|$"),
("syz_err", r"$\log_{10}|\sigma_{yz,\textrm{BIE}} - \sigma_{yz,\textrm{analytic}}|$")
]
for i, (k, title) in enumerate(plots):
plt.subplot(2, 2, 1 + i)
plt.title(title)
v = locals()[k].reshape((nobs, nobs))
v2d = v.reshape((nobs, nobs))
if i < 2:
levels = np.linspace(-0.5, 0.5, 11)
else:
levels = np.linspace(-14, -1, 14)
cntf = plt.contourf(xs, ys, v2d, levels=levels, extend="both")
plt.contour(
xs,
ys,
v2d,
colors="k",
linestyles="-",
linewidths=0.5,
levels=levels,
extend="both",
)
plt.colorbar(cntf)
# plt.xlim([-0.01, 0.01])
# plt.ylim([-0.02, 0.0])
plt.tight_layout()
plt.show()
# run([-2, 2], [-2, 2])
# run([-0.5, 0.5], [0.5, 1.5])
# run([-0.1, 0.1], [0.9, 1.1])
# run([-0.003, 0.003], [0.997, 1.003])
xs = np.linspace(-0.003, 0.003, 100)
ys = [1.001] * xs.shape[0]
obs_pts = np.array([xs, ys]).T.copy()
sing = np.array([(0,-1), (0, 1)])
stress_mat, report = integrate_term(hypersingular, obs_pts, fault, safety_mode=True, singularities=sing, return_report=True)
interior_stress = stress_mat[:,:,:,0].sum(axis=2)
analytical_sxz, analytical_syz = analytical_stress(obs_pts[:,0], obs_pts[:,1])
interior_sxz = interior_stress[:,0]
interior_syz = interior_stress[:,1]
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sxz_err = np.log10(np.abs(interior_sxz - analytical_sxz))
syz_err = np.log10(np.abs(interior_syz - analytical_syz))
report.keys()
report['p']
report['qbx_integration_error']
report['exp_centers'][:, 1]
plt.plot(xs, sxz_err, 'bo')
plt.plot(xs, report['exp_centers'][:, 0] * 1000 - 10)
plt.plot(xs, (report['exp_centers'][:, 1] - 1) * 1000 - 10)
plt.show()
from tectosaur2.laplace2d import Hypersingular
ys = np.linspace(1.0, 1.00005, 2)
xs = [0.0] * ys.shape[0]
obs_pts = np.array([xs, ys]).T.copy()
sing = np.array([(0,-1), (0, 1)])
stress_mat, report = integrate_term(Hypersingular(d_qbx=0), obs_pts, fault, safety_mode=True, singularities=sing, return_report=True)
interior_stress = stress_mat[:,:,:,0].sum(axis=2)
analytical_sxz, analytical_syz = analytical_stress(obs_pts[:,0], obs_pts[:,1])
interior_sxz = interior_stress[:,0]
interior_syz = interior_stress[:,1]
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sxz_err = np.log10(np.abs(interior_sxz - analytical_sxz))
syz_err = np.log10(np.abs(interior_syz - analytical_syz))
plt.plot(ys, sxz_err, 'bo')
plt.show()
# plt.plot(ys, report['exp_rs'])
# plt.show()
correct = analytical_sxz[1]
tct_val = interior_sxz[1]
report['nearfield_n_subsets']
report['nearfield_integration_error']
interior_sxz[1]
from tectosaur2.laplace2d import Hypersingular
op = obs_pts[1:]
# op = report['exp_centers'][50:51]
kernel = Hypersingular()
def integrand(srcy):
src_normals = np.zeros((srcy.shape[0], 2))
src_normals[:,0] = -1
entry = kernel.kernel(op, np.array([0*srcy, srcy]).T.copy(), src_normals)
return entry[0,0,:,0]
grule = gauss_rule(8)
def simple_quad(domain):
xs = domain[0] + (domain[1] - domain[0]) * ((grule[0] + 1) * 0.5)
ws = (domain[1] - domain[0]) * 0.5 * grule[1]
return np.sum(integrand(xs) * ws)
def recursive_quad(domain, tol, base_value=None):
if base_value is None:
base_value = simple_quad(domain)
center = (domain[0] + domain[1]) * 0.5
ldom = [domain[0], center]
lval = simple_quad(ldom)
rdom = [center, domain[1]]
rval = simple_quad(rdom)
better_value = lval + rval
err = np.abs(better_value - base_value)
if err < tol:
return better_value, 2, err
else:
left = recursive_quad(ldom, tol, lval)
right = recursive_quad(rdom, tol, rval)
return left[0] + right[0], left[1] + right[1], left[2] + right[2]
val, n_integrals, err = recursive_quad([-1, 1], 1e-12)
val, val - correct, val - tct_val, n_integrals, err
import quadpy
kronrod_n = 10
kronrod_rule = quadpy.c1.gauss_kronrod(kronrod_n)
kronrod_qx = kronrod_rule.points
kronrod_qw = kronrod_rule.weights
gauss_r = quadpy.c1.gauss_legendre(kronrod_n)
gauss_qx = gauss_r.points
kronrod_qw_gauss = gauss_r.weights
import heapq
def gk_quad(domain):
gxs = domain[0] + (domain[1] - domain[0]) * ((gauss_qx + 1) * 0.5)
gws = (domain[1] - domain[0]) * 0.5 * kronrod_qw_gauss
kxs = domain[0] + (domain[1] - domain[0]) * ((kronrod_qx + 1) * 0.5)
kws = (domain[1] - domain[0]) * 0.5 * kronrod_qw
est1 = np.sum(integrand(gxs) * gws)
est2 = np.sum(integrand(kxs) * kws)
return est1, est2, np.abs(est2 - est1)
def priority_quad(tol):
low_est, est, err = gk_quad([-1, 1])
queue = []
heapq.heappush(queue, (-err, est, -1, 1))
for i in range(1000):
cur_integral = heapq.heappop(queue)
midpt = (cur_integral[2] + cur_integral[3]) * 0.5
left = gk_quad([cur_integral[2], midpt])
right = gk_quad([midpt, cur_integral[3]])
err += cur_integral[0] + left[2] + right[2]
est += -cur_integral[1] + left[1] + right[1]
heapq.heappush(queue, (-left[2], left[1], cur_integral[2], midpt))
heapq.heappush(queue, (-right[2], right[1], midpt, cur_integral[3]))
if err < tol:
break
return est, i, err
val, n_integrals, err = priority_quad(1e-12)
val, val - correct, val - tct_val, n_integrals, err
def kahan_update(kv, a):
y = a - kv[1]
t = kv[0] + y
kv[1] = (t - kv[0]) - y
kv[0] = t
def exact_quad(domain):
rp = op[:, 0] ** 2 + (op[:, 1] - domain[0]) ** 2
ri = op[:, 0] ** 2 + (op[:, 1] - domain[1]) ** 2
sxz = -(1.0 / (2 * np.pi)) * (
((op[:, 1] - domain[0]) / rp) - ((op[:, 1] - domain[1]) / ri)
)
return sxz, sxz, 0
def kahan_priority_quad(tol, exact=False, kahan=False):
quad_fnc = exact_quad if exact else gk_quad
low_est, est_, err_ = quad_fnc([-1, 1])
est = [est_, 0]
err = [err_, 0]
queue = []
heapq.heappush(queue, (-err[0], est[0], -1, 1))
for i in range(1000):
cur_integral = heapq.heappop(queue)
midpt = (cur_integral[2] + cur_integral[3]) * 0.5
left = quad_fnc([cur_integral[2], midpt])
right = quad_fnc([midpt, cur_integral[3]])
if kahan:
kahan_update(err, cur_integral[0])
kahan_update(err, left[2])
kahan_update(err, right[2])
kahan_update(est, -cur_integral[1])
kahan_update(est, left[1])
kahan_update(est, right[1])
else:
err[0] += cur_integral[0] + left[2] + right[2]
est[0] += -cur_integral[1] + left[1] + right[1]
heapq.heappush(queue, (-left[2], left[1], cur_integral[2], midpt))
heapq.heappush(queue, (-right[2], right[1], midpt, cur_integral[3]))
if err[0] < tol:
break
return est[0] + est[1], i, err[0] + err[1]
val, n_integrals, err = kahan_priority_quad(1e-13, exact=True, kahan=False)
val, val - correct, val - tct_val, n_integrals, err
###Output
_____no_output_____ |
setbench/setbench/microbench_experiments/tutorial/tutorial.ipynb | ###Markdown
Data framework: the basic paradigmuser implements one function `define_experiment`then runs `../../tools/data_framework/run_experiment.py`it runs potentially many experimental trials (over all defined configurations), captures output, builds a sqlite database, queries it, produces plots, and produces html pages to display plots...the data framework also provides lots of tools to do querying, plot generation and analysis in jupyter notebooks (see `instructions_data.ipynb`).none of this is specific to setbench! easy to apply to other code bases, as well. (data_framework is self contained--no dependencies on setbench.) The following tutorial fully explains the derivation of several non-trivial `define_experiment()` functions. Run the following code cell before any othersIt does basic initialization for this notebook.
###Code
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
print("Initialized.")
###Output
_____no_output_____
###Markdown
The 'hello world' of `run_experiment.sh`defining a trivial experiment that compiles and runs a single command once and saves the output.we do `run_in_jupyter` and pass `define_experiment`. could alternatively save `define_experiment` in a python file and run the equivalent `run_experiments.sh` command (described in comments)...
###Code
from _basic_functions import *
def define_experiment(exp_dict, args):
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench') ## working dir for compiling
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin') ## working dir for running
set_cmd_compile (exp_dict, 'make brown_ext_abtree_lf.debra')
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./brown_ext_abtree_lf.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-cr')
# if the define_experiment() function above were saved in a file myexp.py,
# then the run_in_jupyter line above is equivalent to running shell command:
# ../../tools/data_framework/run_experiment.py myexp.py -cr
#
# NOTE: -c causes COMPILATION to occur, and -r causes experiments to be RUN
###Output
_____no_output_____
###Markdown
Try the same thing from the command line!- create a file called `myexp.py` in this directory.- start it with `from _basic_functions import *`- copy the `define_experiment` function above into `myexp.py`- run `../../tools/data_framework/run_experiment.py myexp.py -cr` in the shell (starting from this directory)if you get an error along the lines of:`NameError: name 'set_dir_compile' is not defined`then you probably forgot to start the file with `from _basic_functions import *`, which is needed in any file where you define a `define_experiment` function for use with `run_experiment.py`. (Re)running results without compilingyou can rerun experiments without compiling by omitting `-c`
###Code
def define_experiment(exp_dict, args):
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench') ## working dir for compiling
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin') ## working dir for running
set_cmd_compile (exp_dict, 'make brown_ext_abtree_lf.debra')
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./brown_ext_abtree_lf.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-r')
# equiv cmd: [...]/run_experiment.py myexp.py -r
###Output
_____no_output_____
###Markdown
Data files (captured stdout/err)every time the data_framework runs your "run command" (provided by `set_cmd_run`), the output is automatically saved in a `data file`.this is the output of that one run we executed.
###Code
print(shell_to_str('cat data/data000001.txt'))
###Output
_____no_output_____
###Markdown
Running with varying `run param`etersof course running one command isn't very interesting... you could do that yourself.instead, we want to run the command many times, with different arguments. to this end, we allow the user to specify `run param`s.the idea is as follows:- call `add_run_param` to make the data framework aware of parameters that you want your experiments to be run with.- your program will be run once for each set of values in the CROSS PRODUCT of all parameters.- (i.e., we will run your program with every combination of parameters) Replacement strings / tokensyou can use any of the run params you define to dynamically replace `{_tokens_like_this}` in the run command. for example, we include `{DS_TYPENAME}` in our run command, and it will be replaced by the current value of `{DS_TYPENAME}`. (that's right, we can run different commands based on the current value of `DS_TYPENAME`.) you can also get the paths to key directories by using:- `{__dir_compile}`- `{__dir_run}`- `{__dir_data}`the following replacement token is also defined for you:- `{__step}` the number of runs done so far, padded to six digits with leading zeros*note:* we now need to compile ALL of the binaries we want to *run*. so, we just change our make command to compile everything...
###Code
def define_experiment(exp_dict, args):
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6') ## -j specifies how many threads to compile with
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-cr')
###Output
_____no_output_____
###Markdown
Extracting data fields from captured stdout/errNOW we're going to EXTRACT data automatically from the generated data file(s). To do this, we must include the argument `-d` which stands for `database creation`.note 3 data files were produced this time: one for each value of `DS_TYPENAME`. let's put those data files to use by specifying that we want to *extract* some text from each data file.in particular, let's extract a line of the form "`DS_TYPENAME=...`" and a line of the form "`total_throughput=...`" from each data file. (you can find such lines in the data file above if you like.)extracted data is stored in a sqlite database `data/output_database.sqlite` in a table called `data`. (each field name passed to `add_data_field` becomes a **column** in `data`.)to specify a column to be extracted, we call `add_data_field()`. we do this for `total_throughput`, but note that we do *not* have to do this for `DS_TYPENAME`, as it was already added as a `run param`.whenever you add a data field, you should choose a column type `coltype` from:- `'TEXT'`- `'INTEGER'`- `'REAL'`the `default` if you do not specify is `'TEXT'`. note, however, that allowing the default `'TEXT'` option for a `numeric` field can cause problems when it is time to produce **graphs/plots**!
###Code
def define_experiment(exp_dict, args):
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER')
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rd')
###Output
_____no_output_____
###Markdown
Querying the databaseNote that we can simply **access** the last database we created, *WITHOUT rerunning* any experiments, by omitting all command line args in our `run_in_jupyter` call.Also note that you can accomplish the same thing from the **command line** by running `../../tools/data_framework/run_experiment.py myexp.py` with `cmdline_args` omitted. However, since you can't pass your `define_experiments` function as a command line argument, you have to save it in a `.py` file and pass the name `myexp.py` of that file as the first argument to `run_experiment.py`.To query the database, we can use function `select_to_dataframe(sql_string)` with a suitable `sql_string`. There are many other powerful functions included for querying and plotting data, but those are covered in `microbench_experiments/example/instructions_data.ipynb`. In **this** notebook we are focusing on the design of the `define_experiment` function. Extra columnsNote that the resulting query shows numerous extra columns such as `__hostname`, `__step` and `__cmd_run`, that we did *not* add ourselves. These are added *automatically* by the data framework.
###Code
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='')
df = select_to_dataframe('select * from data')
df
# run_in_jupyter call above has equivalent command:
# [...]/run_experiment.py myexp.py
###Output
_____no_output_____
###Markdown
Suppressing logging output in `run_in_jupyter`If you want to call `run_in_jupyter` as above *without* seeing the `logging data` that was copied to stdout, you can disable the log output by calling `disable_tee_stdout()`. Note that logs will still be collected, but the output will **only** go to the log file `output_log.txt`.
###Code
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
disable_tee_stdout()
run_in_jupyter(define_experiment, cmdline_args='')
df = select_to_dataframe('select * from data')
enable_tee_stdout() ## remember to enable, or you won't get output where you DO expect it...
df
###Output
_____no_output_____
###Markdown
Running multiple trialsif you want to perform repeated trials of each experimental configuration, add a run_param called "`__trials`", and specify a list of trial numbers (as below).(the run_param doesn't *need* to be called `__trials` exactly, but if it is called `__trials` exactly,then extra sanity checks will be performed to verify, for example, that each data point in a graphical plotrepresents the average of precisely as many experimental runs as there are entries in the `__trials` list.)
###Code
def define_experiment(exp_dict, args):
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2, 3])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER')
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rd')
###Output
_____no_output_____
###Markdown
Querying the data (to see the multiple trials)
###Code
select_to_dataframe('select * from data')
###Output
_____no_output_____
###Markdown
Extractors: mining data from arbitrary textby default, when you call `add_data_field(exp_dict, 'XYZ')`, a field `'XYZ'` will be fetched from each data file using extractor `grep_line()`, which greps (searches) for a line of the form `'XYZ={arbitrary string}\n'`*if a field you want to extract is not stored that way in the output data*, then you can specify a custom `extractor` function, as we do in our example with `get_maxres()` below, to extract the max resident size from the 6th space-separated column of the output of the linux "time" command.also note: each field added with `add_data_field` becomes a replacement token (e.g., `{DS_TYPENAME}`) that can be references in any plot titles, axis titles, field lists, etc. (which we will see more on below).the following special fields are also defined for you (and added to the `data` table):- `{__step}` the number of runs done so far, padded to six digits with leading zeros- `{__cmd_run}` your cmd_run string with any tokens replaced appropriately for this run- `{__file_data}` the output filename for the current run's data- `{__path_data}` the relative path to the output file for the current run's data- `{__hostname}` the result of running the hostname command on the machine- `{__id}` a unique row IDnote: in the following, `defaults` are `validator=is_nonempty` and `extractor=grep_line`. Text output we are *trying* to extract max resident size fromA line of the form:`960.43user 50.70system 0:06.14elapsed 16449%CPU (0avgtext+0avgdata 3034764maxresident)k`From this, we would like to extract `3034764`, then convert from KB to MB... Extractor that accomplishes this`input`: an `extractor` function takes, as its arguments: the same `exp_dict` argument as `define_experiment()`, a `file_name` to load data from, and a `field_name` to extract.`processing`: it should fetch the appropriate contents for that field, from the given `file_name` and return them.`output`: return type can be a `string`, `int` or `float`.(in cases like this, where we're writing a custom `extractor` to fetch a specific field, the `field_name` argument ends up being irrelevant.)you are free to read the contents of the file, and process the data you see however you like, to come up with the desired return value.in our case, we will use the `shell_to_str()` utility function provided by the data framework to run a sequence of `bash` shell commands to extract the desired string from the file, then cast it to a `float` and convert it from kilobytes to megabytes. (you could just as easily do this with pure python code. the choice is yours.)
###Code
def get_maxres(exp_dict, file_name, field_name):
## manually parse the maximum resident size from the output of `time` and add it to the data file
maxres_kb_str = shell_to_str('grep "maxres" {} | cut -d" " -f6 | cut -d"m" -f1'.format(file_name))
return float(maxres_kb_str) / 1000
###Output
_____no_output_____
###Markdown
**Using** this extractor in `define_experiment`we actually use this extractor by adding a data field and specifying it:`add_data_field (exp_dict, 'maxresident_mb', extractor=get_maxres)`
###Code
def get_maxres(exp_dict, file_name, field_name):
## manually parse the maximum resident size from the output of `time` and add it to the data file
maxres_kb_str = shell_to_str('grep "maxres" {} | cut -d" " -f6 | cut -d"m" -f1'.format(file_name))
return float(maxres_kb_str) / 1000
def define_experiment(exp_dict, args):
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2, 3])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER')
add_data_field (exp_dict, 'maxresident_mb', coltype='REAL', extractor=get_maxres)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rd')
###Output
_____no_output_____
###Markdown
Viewing the resulting datanote the `maxresident_mb` column -- highlighted for emphasis using Pandas DataFrame `style.applymap()`.
###Code
df = select_to_dataframe('select * from data')
df.style.applymap(lambda s: 'background-color: #b63f3f', subset=pd.IndexSlice[:, ['maxresident_mb']])
###Output
_____no_output_____
###Markdown
Validators: *checking* extracted datasuppose you want to run some basic *sanity checks* on fields you pull from data files.a `validator` function is a great way of having the data framework perform a basic check on values as they are extracted from data files.pre-existing `validator` functions:- `is_positive`- `is_nonempty`- `is_equal(to_value)`for example, suppose we want to verify that `total_throughput` and `maxresident_mb` are both **positive** numbers. to do this, we specify `validator=is_positive` for each, below.note: you can write your own `validator` by mimicking the ones in `../../tools/data_framework/_basic_functions.py`. (see `is_positive` and `is_equal`.)
###Code
def get_maxres(exp_dict, file_name, field_name):
## manually parse the maximum resident size from the output of `time` and add it to the data file
maxres_kb_str = shell_to_str('grep "maxres" {} | cut -d" " -f6 | cut -d"m" -f1'.format(file_name))
return float(maxres_kb_str) / 1000
def define_experiment(exp_dict, args):
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2, 3])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_data_field (exp_dict, 'maxresident_mb', coltype='REAL', extractor=get_maxres, validator=is_positive)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rd')
###Output
_____no_output_____
###Markdown
What happens when a field *fails* validation?we trigger a validation failure by specifying an obviously incorrect validator `is_equal('hello')`
###Code
def get_maxres(exp_dict, file_name, field_name):
## manually parse the maximum resident size from the output of `time` and add it to the data file
maxres_kb_str = shell_to_str('grep "maxres" {} | cut -d" " -f6 | cut -d"m" -f1'.format(file_name))
return float(maxres_kb_str) / 1000
def define_experiment(exp_dict, args):
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2, 3])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_equal('hello'))
add_data_field (exp_dict, 'maxresident_mb', coltype='REAL', extractor=get_maxres, validator=is_positive)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rd', error_exit_code=0)
###Output
_____no_output_____
###Markdown
Plotting results (for data with 3 dimensions)One of the main reasons I created the data framework was to make it stupid-easy to produce lots of graphs/plots.The main tool for doing this is the `add_plot_set` function.`add_plot_set()` can be used to cause a SET of plots to be rendered as images in the data directory.the precise SET of plots is defined by the fields included in `varying_cols_list` keyword argument. (the data framework will iterate over all distinct combinations of values in `varying_cols_list`, and will render a plot for each.) in the example below, we do *not* pass any `varying_cols_list` argument, so only a single plot is produced.(we will see where `varying_cols_list` is useful, and how it is used, in some of the later examples...)Note: a plot's title and filename can only use replacement `{tokens}` that correspond to fields THAT ARE INCLUDED in `varying_cols_list[]`. (this is because only those tokens are well defined and unique PER PLOT) Note: any plots you define are *not actually rendered* unless you add command line argument `-p`
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## tools library for plotting
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel 5 5 -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_plot_set(
exp_dict
, name='throughput.png'
, title='Throughput vs data structure'
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis='total_throughput'
, plot_type='bars', plot_cmd_args = '--legend-include'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rdp')
###Output
_____no_output_____
###Markdown
Let's view the data and plot produced by the previous cell(You have to run the previous cell before running the next one.)
###Code
from IPython.display import Image
display(Image('data/throughput.png'))
display(select_to_dataframe('select * from data'))
###Output
_____no_output_____
###Markdown
Plotting data with a custom functionIf you want full control over how your data is plotted, you can specify your own function as the `plot_type` argument.Your custom function will be called with keyword arguments:- `filename` -- the output filename for the plot image- `column_filters` -- the *current* values of all fields in `varying_cols_list` (if any)- `data` -- a Pandas DataFrame containing the (filtered) data for this plot- `series_name` -- name of the column containing `series` in `data` (`''` if no series)- `x_name` -- name of the column containing `x-values` in `data`- `y_name` -- name of the column containing `y-values` in `data`- `exp_dict` -- same as `exp_dict` passed to `define_experiment`To *better understand* what data is passed to a custom function, let's create a custom function that just prints its arguments.
###Code
def my_plot_func(filename, column_filters, data, series_name, x_name, y_name, exp_dict=None):
print('## filename: {}'.format(filename))
print('## filters: {}'.format(column_filters))
print('## data:')
print(data)
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel 5 5 -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_plot_set(
exp_dict
, name='throughput.png'
, title='Throughput vs data structure'
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis='total_throughput'
, plot_type=my_plot_func
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
disable_tee_stdout() ## disable regular log printing so we ONLY see OUR prints below
run_in_jupyter(define_experiment, cmdline_args='-dp')
enable_tee_stdout()
###Output
_____no_output_____
###Markdown
For example, we can plot this data *manually* using `Pandas`Since we have `TWO trials` per combination of `DS_TYPENAME` and `TOTAL_THREADS`, we need to aggregate our data somehow before plotting. We can use `pandas` `pivot_table()` function to compute the `mean` of the trials for each data point.Once we have a pivot table, we can call `pandas` `plot()` to render it, then use `savefig()` to save it to the provided `filename`.Of course, you can write your own such functions, and make them arbitrarily complex/customized...
###Code
import pandas
import matplotlib as mpl
def my_plot_func(filename, column_filters, data, series_name, x_name, y_name, exp_dict=None):
table = pandas.pivot_table(data, index=x_name, columns=series_name, values=y_name, aggfunc='mean')
table.plot(kind='line')
mpl.pyplot.savefig(filename)
print('## SAVED FIGURE {}'.format(filename))
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel 5 5 -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_plot_set(
exp_dict
, name='throughput.png'
, title='Throughput vs data structure'
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis='total_throughput'
, plot_type=my_plot_func
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
disable_tee_stdout()
run_in_jupyter(define_experiment, cmdline_args='-dp')
enable_tee_stdout()
###Output
_____no_output_____
###Markdown
Viewing the generated figure
###Code
from IPython.display import Image
display(Image('data/throughput.png'))
###Output
_____no_output_____
###Markdown
Producing *many* plots (for data with 5 dimensions)the real power of `add_plot_set` only starts to show once you want to plot *many* plots at once.so, let's add a couple of dimensions to our data:- key range (`MAXKEY` in the data file)- update rate (`INS_DEL_FRAC` in the data file)and use them to produce **multiple plots** (one for each combination of values of these dimensions). we do this by specifying `varying_cols_list` in `add_plot_set`.we can also customize the plot file`name`s and `title`s with these parameters. Showing these plots in a table in an HTML pagewe also generate an HTML page to show off these grids in a table by invoking `add_page_set`.HTML page construction only occurs if you specify command line argument `-w` (which stands for `website creation`) to `run_experiment.py`. so, we add this to `run_in_jupyter`.note: you can also customize the `index.html` starting page (which is blank by default) by providing your own `HTML body` string to the function `set_content_index_html(exp_dict, content_html_string)`.
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## path to tools library
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make bin_dir={__dir_run} -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ["0.0 0.0", "5.0 5.0"])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_plot_set(
exp_dict
, name='throughput-{INS_DEL_FRAC}-{MAXKEY}k.png'
, title='{INS_DEL_FRAC} {MAXKEY}k: throughput'
, varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis='total_throughput'
, plot_type='bars'
)
## render one legend for all plots (since the legend is the same for all).
## if legend varies from plot to plot, you might enable legends for all plots,
## or write a custom plotting command that determines what to do, given your data
add_plot_set(exp_dict, name='throughput-legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## we place the above legend at the bottom of *each* table by providing "legend_file"
add_page_set(
exp_dict
, image_files='throughput-{INS_DEL_FRAC}-{MAXKEY}k.png'
, name='throughput'
, column_field='INS_DEL_FRAC'
, row_field='MAXKEY'
, legend_file='throughput-legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rdpw')
###Output
_____no_output_____
###Markdown
Let's view the plots produced by the previous cellnote you can click on the plots to "drill down" into the data.
###Code
show_html('data/throughput.html')
###Output
_____no_output_____
###Markdown
How about 4 dimensions?We just saw how to plot 3- and 5-dimensional data...Let's remove the `MAXKEY` column / data dimension to reduce the dimensionality of the data to 4.With only one column in the `varying_cols_list` and NO `row_field` specified in `add_page_set`, there will only be one row of plots. (So a strip of plots instead of a grid.)
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## path to tools library
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make bin_dir={__dir_run} -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'INS_DEL_FRAC', ["0.0 0.0", "5.0 5.0"])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k 200000 -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_plot_set(
exp_dict
, name='throughput-{INS_DEL_FRAC}.png'
, title='{INS_DEL_FRAC}: throughput'
, varying_cols_list=['INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis='total_throughput'
, plot_type='bars'
)
## render one legend for all plots (since the legend is the same for all).
## if legend varies from plot to plot, you might enable legends for all plots,
## or write a custom plotting command that determines what to do, given your data
add_plot_set(exp_dict, name='throughput-legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## we place the above legend at the bottom of *each* table by providing "legend_file"
add_page_set(
exp_dict
, image_files='throughput-{INS_DEL_FRAC}.png'
, name='throughput'
, column_field='INS_DEL_FRAC'
, legend_file='throughput-legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rdpw')
###Output
_____no_output_____
###Markdown
Let's view the plots produced by the previous cell
###Code
show_html('data/throughput.html')
###Output
_____no_output_____
###Markdown
Plots and HTML for data with 6 dimensionsnote that we could have added more than 2 dimensions of data (resulting in data with 6+ dimensions), listing potentially many fields in `varying_cols_list`, and this simply would have resulted in *more plots*.note that if we had **one** more dimension of data (6 dimensions in total), it could be listed in the keyword argument `table_field`, and **multiple** HTML tables would be rendered in a single HTML page (one for each value of this column).
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## path to tools library
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make bin_dir={__dir_run} -j6')
add_run_param (exp_dict, '__trials', [1])
add_run_param (exp_dict, 'TOTAL_THREADS', [2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ['0.0 0.0', '5.0 5.0'])
## unlike the above four fields,
## the run command does NOT produce a line of the form 'malloc=[...]'.
## so, run_experiment.py will APPEND a line of this form to the datafile!
add_run_param (exp_dict, 'malloc', ['jemalloc', 'mimalloc'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/lib{malloc}.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_data_field (exp_dict, 'malloc', validator=is_run_param('malloc'))
add_plot_set(
exp_dict
, name='throughput-{malloc}-{INS_DEL_FRAC}-{MAXKEY}.png'
, title='{malloc} {INS_DEL_FRAC} {MAXKEY}'
, varying_cols_list=['malloc', 'MAXKEY', 'INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis='total_throughput'
, plot_type='bars'
)
## render one legend for all plots (since the legend is the same for all).
## if legend varies from plot to plot, you might enable legends for all plots,
## or write a custom plotting command that determines what to do, given your data
add_plot_set(exp_dict, name='throughput-legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## note: choice of column / row / table field determines how the HTML page looks -- up to you!
add_page_set(
exp_dict
, image_files='throughput-{malloc}-{INS_DEL_FRAC}-{MAXKEY}.png'
, name='throughput'
, column_field='INS_DEL_FRAC'
, row_field='MAXKEY'
, table_field='malloc'
, legend_file='throughput-legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rdpw')
###Output
_____no_output_____
###Markdown
Let's view the data, plots and HTML we produced
###Code
show_html('data/throughput.html')
display(select_to_dataframe('select * from data'))
###Output
_____no_output_____
###Markdown
Plots and HTML for data with 7+ dimensionsif we had MORE than one extra dimension of data (7+ dimensions in total), we could list additional fields in the keyword argument `page_field_list`, which would cause additional HTML pages to be rendered (one for each combination of values for fields in `page_field_list`), and linked together by an `index.htm`. (note that the `name` keyword argument of `page_field_list` must also be modified to reference these fields, in order for multiple HTML files to be created---you must specify what sort of naming convention you'd like the framework to use.)
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## path to tools library
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make bin_dir={__dir_run} -j6')
add_run_param (exp_dict, '__trials', [1])
add_run_param (exp_dict, 'TOTAL_THREADS', [2, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ['0.0 0.0', '5.0 5.0'])
## unlike the above four fields,
## the run command does NOT produce a line of the form 'malloc=[...]'.
## so, run_experiment.py will APPEND a line of this form to the datafile!
add_run_param (exp_dict, 'malloc', ['jemalloc', 'mimalloc'])
## ditto for reclaimer
add_run_param (exp_dict, 'numactl', ['', 'numactl --interleave=all'])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/lib{malloc}.so {numactl} time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_plot_set(
exp_dict
, name='throughput-{malloc}-{numactl}-{INS_DEL_FRAC}-{MAXKEY}.png'
, title='{INS_DEL_FRAC} {MAXKEY}'
, varying_cols_list=['malloc', 'numactl', 'MAXKEY', 'INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis='total_throughput'
, plot_type='bars'
)
## render one legend for all plots (since the legend is the same for all).
## if legend varies from plot to plot, you might enable legends for all plots,
## or write a custom plotting command that determines what to do, given your data
add_plot_set(exp_dict, name='throughput-legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## we place the above legend at the bottom of *each* table by providing "legend_file"
add_page_set(
exp_dict
, image_files='throughput-{malloc}-{numactl}-{INS_DEL_FRAC}-{MAXKEY}.png'
, name='throughput'
, column_field='numactl'
, row_field='malloc'
, table_field='MAXKEY'
, page_field_list=['INS_DEL_FRAC']
, legend_file='throughput-legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rdpw')
###Output
_____no_output_____
###Markdown
Let's view the data, plots and HTML we produced
###Code
show_html('data/index.html')
display(select_to_dataframe('select * from data'))
###Output
_____no_output_____
###Markdown
It's easy to plot *many* value fields vs your `run_params`Let's go back to our 5-dimensional data example to demonstrate how to easily produce plots from *many different value fields* (not just `total_throughput`). First let's run a quick shell command to check what kinds of fields exist in our data(This command uses `grep` with a simple `regex` to look for lines of the form "XYZ=*number*")
###Code
shell_to_list('grep -E "^[^ =]+=[0-9.]+$" data/data000001.txt', sep='\n')
###Output
_____no_output_____
###Markdown
Let's focus on the following fields from that list:- `tree_stats_numNodes`- `tree_stats_height`- `tree_stats_avgKeyDepth`- `global_epoch_counter`- `PAPI_L2_TCM`- `PAPI_L3_TCM`- `PAPI_TOT_CYC`- `PAPI_TOT_INS`- `total_throughput`
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ["0.0 0.0", "5.0 5.0"])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')
add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')
add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')
## render one legend for all plots (since the legend is the same for all).
## if legend varies from plot to plot, you might enable legends for all plots,
## or write a custom plotting command that determines what to do, given your data
add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## render a plot_set for EVERY numeric data field extracted above
for field in get_numeric_data_fields(exp_dict):
add_plot_set(
exp_dict
, name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'
, title='{INS_DEL_FRAC} {MAXKEY}k: '+field
, varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis=field
, plot_type='bars'
)
## and also add a page_set for each data field.
## we place the above legend at the bottom of *each* table by providing "legend_file"
add_page_set(
exp_dict
, image_files=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'
, name=field
, column_field='INS_DEL_FRAC'
, row_field='MAXKEY'
, legend_file='legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rdpw')
###Output
_____no_output_____
###Markdown
Viewing the results
###Code
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
show_html('data/index.html')
###Output
_____no_output_____
###Markdown
Rendering *many data fields* on a *single* HTML pagein the previous example, we build one page for each data field extracted. however, you might want, for example, to build a single page with many data fields, each appearing as a *row* of plots.if you take a moment to think about *how* you would accomplish this using `add_page_set`, it's not obvious that you even *can*... you can specify *one field* as the `row_field`, but in this case we want to show *many different fields, one per row*.
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ["0.0 0.0", "5.0 5.0"])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')
add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')
add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')
## render one legend for all plots
add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## render plots
value_fields = get_numeric_data_fields(exp_dict)
for field in value_fields:
add_plot_set(
exp_dict
, name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'
, title='{INS_DEL_FRAC} {MAXKEY}k: '+field
, varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis=field
, plot_type='bars'
)
## and also add a page_set to show all plots
add_page_set(
exp_dict
, image_files='{row_field}-{INS_DEL_FRAC}-{MAXKEY}k.png'
, name='comparison'
, column_field='INS_DEL_FRAC'
, row_field=value_fields
, table_field='MAXKEY'
, legend_file='legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-dpw')
###Output
_____no_output_____
###Markdown
Viewing the results
###Code
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
show_html('data/index.html')
###Output
_____no_output_____
###Markdown
Separating `tables` into different `pages`if you prefer, you can eliminate the `table_field` argument to `add_page_set` and instead use `page_field_list`. this produces a slightly different effect.
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ["0.0 0.0", "5.0 5.0"])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')
add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')
add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')
## render one legend for all plots
add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## render plots
value_fields = get_numeric_data_fields(exp_dict)
for field in value_fields:
add_plot_set(
exp_dict
, name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'
, title='{INS_DEL_FRAC} {MAXKEY}k: '+field
, varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis=field
, plot_type='bars'
)
## and also add a page_set to show all plots
add_page_set(
exp_dict
, image_files='{row_field}-{INS_DEL_FRAC}-{MAXKEY}k.png'
, name='comparison'
, column_field='INS_DEL_FRAC'
, row_field=value_fields
, page_field_list=['MAXKEY']
, legend_file='legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-dpw')
###Output
_____no_output_____
###Markdown
Viewing the results
###Code
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
show_html('data/index.html')
###Output
_____no_output_____
###Markdown
Defining a `--testing` mode Briefly running each configuration *BEFORE* doing a full runi often find it useful to have a `testing` mode (enabled with argument `--testing`), that runs for less time, but still explores all (important) configurations of run parameters, to make sure nothing simple will fail when i run for many hours. (fail-fast is good!)to this end, a variable called `args.testing` is accessible in `define_experiment`, and if it's `True`, then the user has passed `--testing` as a command line arg.the correct response to this is to limit the set of configurations somehow, perhaps be reducing the number of thread counts, and/or the reducing length of time to execute in each trial, and/or limiting runs to a single trial, and/or eliminating data structure prefilling (or anything else that you find appropriate).for example, let's add a simple `--testing` mode to the previous code cell.note the `if args.testing:` block, as well as the `--testing` argument passed to `run_in_jupyter` *instead of* the previous `` argument. (we also eliminate the `-r` argument, since we want to actually run our testing mode.)observe that this new `--testing` mode takes around 20 seconds to run, compared to several minutes without specifying `--testing`. (this time difference becomes much more drastic if you would normally run more trials, thread counts, or for longer than 1 second. :)) i make it a habit to run in `--testing` mode and take a quick peek at the results before running my full experiments.
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ["0.0 0.0", "5.0 5.0"])
millis_to_run = 1000
## defined a reduced set of configurations for testing mode
if args.testing:
add_run_param (exp_dict, '__trials', [1])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 8])
millis_to_run = 100
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t ' + str(millis_to_run))
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')
add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')
add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')
## render one legend for all plots
add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## render plots
value_fields = get_numeric_data_fields(exp_dict)
for field in value_fields:
add_plot_set(
exp_dict
, name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'
, title='{INS_DEL_FRAC} {MAXKEY}k: '+field
, varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis=field
, plot_type='bars'
)
## and also add a page_set to show all plots
add_page_set(
exp_dict
, image_files='{row_field}-{INS_DEL_FRAC}-{MAXKEY}k.png'
, name='comparison'
, column_field='INS_DEL_FRAC'
, row_field=value_fields
, page_field_list=['MAXKEY']
, legend_file='legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='--testing -rdpw')
###Output
_____no_output_____
###Markdown
Viewing the `--testing` mode results
###Code
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
show_html('data/index.html')
###Output
_____no_output_____
###Markdown
Custom output filename patternsin the experiments above, we have always used the default filename for output files: `dataXXXXXX.txt`.if you want a different file naming scheme, it's easy to specify a pattern for this using `set_file_data(exp_dict, pattern)`.let's see an example of this, where we include the current values of several `run_param`s in the outfile file pattern.(you can also set the output directory with `set_dir_data(exp_dict, path)`.)
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ["0.0 0.0", "5.0 5.0"])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')
set_file_data (exp_dict, 'my_data_n{TOTAL_THREADS}_k{MAXKEY}_insdel{INS_DEL_FRAC}_{DS_TYPENAME}.txt')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='--testing -rdpw')
###Output
_____no_output_____
###Markdown
Automatic best-effort sanity checksthe data framework does its best to identify some basic mistakes that are common when running repeated experiments over a large configuration space. we describe some of them here, and show how they work.for example, observe that the following `define_experiment` function attempts to plot `TOTAL_THREADS` on the x-axis, `total_throughput` on the y-axis, with `DS_TYPENAME` as the series, but completely ignores `MAXKEY` in the `add_plot_set` call.this is a mistake, as this would result in `averaging` unrelated data points with two *different* values of `MAXKEY`.run the following code cell to see the detailed error message that results in this situation. it attempts to be as helpful as possible in helping you diagnose the cause. in this case it essentially identifies and highlights the problematic column (`MAXKEY`) *for you*, and suggests a fix (adding it to the `varying_cols_list` argument when calling `add_plot_set`).of course, just because something plots successfully doesn't mean you haven't made a mistake... but we do our best to catch a variety of simple mistakes. (or at least assert and fail-fast when *some* sensible assumptions are violated.)
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel 0.5 0.5 -k {MAXKEY} -t 100')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_plot_set(
exp_dict
, name='throughput.png'
, title='throughput'
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis='total_throughput'
, plot_type='bars'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rdp', error_exit_code=0)
###Output
_____no_output_____
###Markdown
Automatic archival features (data zip, git commit hash fetch, git diff file archive)activated with command line arg: `-z` (which stands for `zip creation`)the data framework offers a powerful convenience for archiving your experiments: it can automatically ZIP *as little data as is needed* to guarantee you won't lose the ability to return to this exact code/data state (file/directory structure).how does it do this?well, assuming you are working a git repository, and are committing changes as you go, the repository's current `commit hash` presumably gives you a way to get *pretty close* to your current file/directory structure.but of course, it will be missing any changes you've made since your last commit! this includes all of the data you've just generated, as well as any tentative code changes you've made (perhaps experimental changes you're currently testing).happily, we can *extract* the list of files you've changed *since your last commit* directly with a `git` command: `git status -s | awk '{if ($1 != "D") print $2}' | grep -v "/$"`so, we do this, and then once we have this list of files, we selectively add *them* to a ZIP file along with the data directory we just produced, as well as the file `output_log.txt`.crucially, any files that are ignored by `git` (because they are covered by a pattern in your `.gitignore` file) will *NOT* be added to the ZIP file. this means you can automatically exclude files easily that you wouldn't want in your repo anyway. (normally the `data` folder produced by your experiments would probably fall into that category, but we add it manually. if you want to add more files manually, see the function `do_finish` in `run_experiment.py`.)this whole process should make it easier to achieve a *much* smaller file size for an archive that you *can* reconstruct to reproduce experiments. this smaller file size *should* make it feasible to archive *every* set of experiments you run by default, along with enough information to understand exactly what was run, later. (and, you should only occasionally have to clean up your archives.) this can help you eliminate one of the questions we all *hate* asking: `what on earth did we run to get these results?`to help you reconstruct your current file/directory state later, we dump all relevant information about the `current commit`, including the `commit hash` to `output_log.txt` before we add it to the ZIP. you can find this information about the commit by looking for `'git status:'` or `'commit hash='` in `output_log.txt`.for example, the following code causes text along the following lines to be archived as part of `output_log.txt`: Fetching git status and any uncommitted changes for archival purposes commit_hash=05ec0e2184bd8c7a30e22457483cbeeadd0c2461 git_status: On branch data_framework Your branch is up to date with 'origin/data_framework'. Changes not staged for commit: (use "git add ..." to update what will be committed) (use "git checkout -- ..." to discard changes in working directory) (commit or discard the untracked or modified content in submodules) modified: .vscode/settings.json modified: microbench_experiments/tutorial/tutorial.ipynb modified: microbench_experiments/tutorial/tutorial_extra.ipynb modified: tools (new commits, modified content) no changes added to commit (use "git add" and/or "git commit -a") diff_files=['.vscode/settings.json', 'microbench_experiments/tutorial/tutorial.ipynb', 'microbench_experiments/tutorial/tutorial_extra.ipynb', 'tools'] on my system, the following code produces an archive smaller than `3MB`, which offers complete reproducibility (and even includes 37 generated plots), despite the entire contents of setbench reaching `140MB`!
###Code
def define_experiment(exp_dict, args):
set_dir_tools (exp_dict, os.getcwd() + '/../../tools')
set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')
set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')
set_cmd_compile (exp_dict, 'make -j6')
add_run_param (exp_dict, '__trials', [1, 2])
add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])
add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])
add_run_param (exp_dict, 'MAXKEY', [20000, 200000])
add_run_param (exp_dict, 'INS_DEL_FRAC', ["0.0 0.0", "5.0 5.0"])
set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')
add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)
add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')
add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')
add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')
add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')
add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')
## render one legend for all plots
add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')
## render plots
value_fields = get_numeric_data_fields(exp_dict)
for field in value_fields:
add_plot_set(
exp_dict
, name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'
, title='{INS_DEL_FRAC} {MAXKEY}k: '+field
, varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']
, series='DS_TYPENAME'
, x_axis='TOTAL_THREADS'
, y_axis=field
, plot_type='bars'
)
## and also add a page_set to show all plots
add_page_set(
exp_dict
, image_files='{row_field}-{INS_DEL_FRAC}-{MAXKEY}k.png'
, name='comparison'
, column_field='INS_DEL_FRAC'
, row_field=value_fields
, table_field='MAXKEY'
, legend_file='legend.png'
)
import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *
run_in_jupyter(define_experiment, cmdline_args='-rdpwz')
###Output
_____no_output_____ |
notes/02 Pandas Mini-Project/Statistics from Stock Data.ipynb | ###Markdown
Statistics from Stock DataIn this lab we will load stock data into a Pandas Dataframe and calculate some statistics on it. We will be working with stock data from Google, Apple, and Amazon. All the stock data was downloaded from yahoo finance in CSV format. In your workspace you should have a file named GOOG.csv containing the Google stock data, a file named AAPL.csv containing the Apple stock data, and a file named AMZN.csv containing the Amazon stock data. All the files contain 7 columns of data:**Date Open High Low Close Adj_Close Volume**We will start by reading in any of the above CSV files into a DataFrame and see what the data looks like.
###Code
# We import pandas into Python
import pandas as pd
# We read in a stock data data file into a data frame and see what it looks like
df = pd.read_csv('./GOOG.csv')
# We display the first 5 rows of the DataFrame
df.head()
###Output
_____no_output_____
###Markdown
We clearly see that the Dataframe is has automatically labeled the row indices using integers and has labeled the columns of the DataFrame using the names of the columns in the CSV files. To DoYou will now load the stock data from Google, Apple, and Amazon into separte DataFrames. However, for each stock data you will only be interested in loading the `Date` and `Adj Close` columns into the Dataframe. In addtion, you want to use the `Date` column as your row index. Finally, you want the DataFrame to recognize the dates as actual dates (year/month/day) and not as strings. For each stock, you can accomplish all theses things in just one line of code by using the appropiate keywords in the `pd.read_csv()` function. Here are a few hints:* Use the `index_col` keyword to indicate which column you want to use as an index. For example `index_col = ['Open']`* Set the `parse_dates` keyword equal to `True` to convert the Dates into real dates of the form year/month/day* Use the `usecols` keyword to select which columns you want to load into the DataFrame. For example `usecols = ['Open', 'High']`Fill in the code below:
###Code
# We load the Google stock data into a DataFrame
google_stock = pd.read_csv('./GOOG.csv', parse_dates=True)
# We load the Apple stock data into a DataFrame
apple_stock = pd.read_csv('./AAPL.csv', parse_dates=True)
# We load the Amazon stock data into a DataFrame
amazon_stock = pd.read_csv('./AMZN.csv', parse_dates=True)
###Output
_____no_output_____
###Markdown
You can check that you have loaded the data correctly by displaying the head of the DataFrames.
###Code
# We display the google_stock DataFrame
google_stock.head()
###Output
_____no_output_____
###Markdown
You will now join the three DataFrames above to create a single new DataFrame that contains all the `Adj Close` for all the stocks. Let's start by creating an empty DataFrame that has as row indices calendar days between `2000-01-01` and `2016-12-31`. We will use the `pd.date_range()` function to create the calendar dates first and then we will create a DataFrame that uses those dates as row indices:
###Code
# We create calendar dates between '2000-01-01' and '2016-12-31'
dates = pd.date_range('2000-01-01', '2016-12-31')
# We create and empty DataFrame that uses the above dates as indices
all_stocks = pd.DataFrame(index = dates)
###Output
_____no_output_____
###Markdown
To DoYou will now join the the individual DataFrames, `google_stock`, `apple_stock`, and `amazon_stock`, to the `all_stocks` DataFrame. However, before you do this, it is necessary that you change the name of the columns in each of the three dataframes. This is because the column labels in the `all_stocks` dataframe must be unique. Since all the columns in the individual dataframes have the same name, `Adj Close`, we must change them to the stock name before joining them. In the space below change the column label `Adj Close` of each individual dataframe to the name of the corresponding stock. You can do this by using the `pd.DataFrame.rename()` function.
###Code
# Change the Adj Close column label to Google
google_stock = google_stock.rename(columns = {'Adj Close': 'Google'})
# Change the Adj Close column label to Apple
apple_stock = apple_stock.rename(columns = {'Adj Close': 'Apple'})
# Change the Adj Close column label to Amazon
amazon_stock = amazon_stock.rename(columns = {'Adj Close':'Amazon'})
###Output
_____no_output_____
###Markdown
You can check that the column labels have been changed correctly by displaying the datadrames
###Code
# We display the google_stock DataFrame
google_stock.head()
# We display the apple_stock DataFrame
apple_stock.head()
# We display the amazon_stock DataFrame
amazon_stock.head()
###Output
_____no_output_____
###Markdown
Now that we have unique column labels, we can join the individual DataFrames to the `all_stocks` DataFrame. For this we will use the `dataframe.join()` function. The function `dataframe1.join(dataframe2)` joins `dataframe1` with `dataframe2`. We will join each dataframe one by one to the `all_stocks` dataframe. Fill in the code below to join the dataframes, the first join has been made for you:
###Code
# We join the Google stock to all_stocks
all_stocks = all_stocks.join(google_stock, lsuffix="_all_stocks", rsuffix="_google")
# We join the Apple stock to all_stocks
all_stocks = all_stocks.join(apple_stock, lsuffix="_all_stocks", rsuffix="_google")
# We join the Amazon stock to all_stocks
all_stocks =all_stocks.join(amazon_stock, lsuffix="_all_stocks", rsuffix="_google")
###Output
_____no_output_____
###Markdown
You can check that the dataframes have been joined correctly by displaying the `all_stocks` dataframe
###Code
# We display the all_stocks DataFrame
all_stocks.head()
###Output
_____no_output_____
###Markdown
To DoBefore we proceed to get some statistics on the stock data, let's first check that we don't have any *NaN* values. In the space below check if there are any *NaN* values in the `all_stocks` dataframe. If there are any, remove any rows that have *NaN* values:
###Code
# Check if there are any NaN values in the all_stocks dataframe
all_stocks.isnull().sum().sum()
# Remove any rows that contain NaN values
all_stocks.dropna(axis = 0)
###Output
_____no_output_____
###Markdown
You can check that the *NaN* values have been eliminated by displaying the `all_stocks` dataframe
###Code
# Check if there are any NaN values in the all_stocks dataframe
###Output
_____no_output_____
###Markdown
Display the `all_stocks` dataframe and verify that there are no *NaN* values
###Code
# We display the all_stocks DataFrame
all_stocks.head()
###Output
_____no_output_____
###Markdown
Now that you have eliminated any *NaN* values we can now calculate some basic statistics on the stock prices. Fill in the code below
###Code
# Print the average stock price for each stock
all_stocks.fillna(all_stocks.mean(), axis = 0)
# Print the median stock price for each stock
all_stocks.fillna(all_stocks.median(), axis = 0)
# Print the standard deviation of the stock price for each stock
all_stocks.fillna(all_stocks.std(), axis = 0)
# Print the correlation between stocks
all_stocks.corr()
###Output
_____no_output_____
###Markdown
We will now look at how we can compute some rolling statistics, also known as moving statistics. We can calculate for example the rolling mean (moving average) of the Google stock price by using the Pandas `dataframe.rolling().mean()` method. The `dataframe.rolling(N).mean()` calculates the rolling mean over an `N`-day window. In other words, we can take a look at the average stock price every `N` days using the above method. Fill in the code below to calculate the average stock price every 150 days for Google stock
###Code
# We compute the rolling mean using a 150-Day window for Google stock
rollingMean = dataframe.rolling(150).mean()
###Output
_____no_output_____
###Markdown
We can also visualize the rolling mean by plotting the data in our dataframe. In the following lessons you will learn how to use **Matplotlib** to visualize data. For now I will just import matplotlib and plot the Google stock data on top of the rolling mean. You can play around by changing the rolling mean window and see how the plot changes.
###Code
# This allows plots to be rendered in the notebook
%matplotlib inline
# We import matplotlib into Python
import matplotlib.pyplot as plt
# We plot the Google stock data
plt.plot(all_stocks['Google'])
# We plot the rolling mean ontop of our Google stock data
plt.plot(rollingMean)
plt.legend(['Google Stock Price', 'Rolling Mean'])
plt.show()
###Output
_____no_output_____ |
results_processed/publication/massbank/ssvm_lib=v2__exp_ver=4/exp_03__stereochemistry.ipynb | ###Markdown
Experiment 3: Study of LC-MS$^2$Struct ability to improve the identification of stereoisomersIn this experiment we output a score (either Only-MS$^2$ or LC-MS$^2$Struct) for each stereoisomer in the candidate set. Candidates are identified (or indexed, or distiguished) by their full InChIKey. Typically, the Only-MS$^2$ scores will be the same for a group of stereoisomers, that means for a group of candidates with the same InChIKey first block. By using LC-MS$^2$Struct, we hope to be able to rank the stereoisomers the correct way. To study that, we train two LC-MS$^2$Struct models for each MS$^2$ scorer: One that uses candidate fingerprints that encode chirality (3D); and one that uses fingerprint without chirality encoding (2D). The motiviation behind this experiment is, that we want to see how much rank-improvement is coming from the fact that provide the SSVM with information to distinguish between stereoisomers. When predicting the scores using LC-MS$^2$Struct (2D) we can observe improved ranking. Perhaps not for the stereoisomers, but generally we still capture 2D structure that improves the ranking, e.g. by ranking the correct "block" of stereoisomers higher than another "block" due to a better fitting with the observed ROs. Using the 3D features, we actually allow the LC-MS$^2$Struct (3D) to predict a different score for each candidates (each candidate now "looks different" to the ML model). Comparing the performance of 2D and 3D should give us an isign of how well we actually use the 3D information. Load raw results for all three MS$^2$ scorers
###Code
agg_setting = {
"marg_agg_fun": "average",
"cand_agg_id": "inchikey"
}
###Output
_____no_output_____
###Markdown
MetFragMetFrag performs an in-silico fragmentation for each candidate structure and compares the predicted and observed (from the MS2 spectrum) fragments.
###Code
# SSVM (2D)
setting = {"ds": "*", "mol_feat": "FCFP__binary__all__2D", "mol_id": "cid", "ms2scorer": "metfrag__norm", "ssvm_flavor": "default", "lloss_mode": "mol_feat_fps"}
res__ssvm__metfrag__2D = load_topk__publication(
setting, agg_setting, basedir=os.path.join("massbank__with_stereo"), top_k_method="csi", load_max_model_number=True
)
# SSVM (3D)
setting = {"ds": "*", "mol_feat": "FCFP__binary__all__3D", "mol_id": "cid", "ms2scorer": "metfrag__norm", "ssvm_flavor": "default", "lloss_mode": "mol_feat_fps"}
res__ssvm__metfrag__3D = load_topk__publication(
setting, agg_setting, basedir=os.path.join("massbank__with_stereo"), top_k_method="csi", load_max_model_number=True
)
# Perform some sanity checks
assert res__ssvm__metfrag__2D["scoring_method"].nunique() == 2
assert res__ssvm__metfrag__3D["scoring_method"].nunique() == 2
_check_onlyms(res__ssvm__metfrag__2D, [res__ssvm__metfrag__3D])
###Output
Performed tests: [1500.]
###Markdown
Overview result table (LC-MS$^2$Struct) Without chrirality encoding (2D)
###Code
tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__metfrag__2D, test="ttest", ks=[1, 5, 10, 20])
tab.pivot(columns=["k", "scoring_method"], index=["dataset", "n_samples"], values="top_k_acc__as_labels")
###Output
_____no_output_____
###Markdown
With chirality encoding (3D)
###Code
tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__metfrag__3D, test="ttest", ks=[1, 5, 10, 20])
tab.pivot(columns=["k", "scoring_method"], index=["dataset", "n_samples"], values="top_k_acc__as_labels")
###Output
_____no_output_____
###Markdown
SIRIUS
###Code
# SSVM (2D)
setting = {"ds": "*", "mol_feat": "FCFP__binary__all__2D", "mol_id": "cid", "ms2scorer": "sirius__norm", "ssvm_flavor": "default", "lloss_mode": "mol_feat_fps"}
res__ssvm__sirius__2D = load_topk__publication(
setting, agg_setting, basedir=os.path.join("massbank__with_stereo"), top_k_method="csi", load_max_model_number=True
)
# SSVM (3D)
setting = {"ds": "*", "mol_feat": "FCFP__binary__all__3D", "mol_id": "cid", "ms2scorer": "sirius__norm", "ssvm_flavor": "default", "lloss_mode": "mol_feat_fps"}
res__ssvm__sirius__3D = load_topk__publication(
setting, agg_setting, basedir=os.path.join("massbank__with_stereo"), top_k_method="csi", load_max_model_number=True
)
# Perform some sanity checks
assert res__ssvm__sirius__2D["scoring_method"].nunique() == 2
assert res__ssvm__sirius__3D["scoring_method"].nunique() == 2
_check_onlyms(res__ssvm__sirius__2D, [res__ssvm__sirius__3D])
###Output
Performed tests: [1500.]
###Markdown
Overview result table (LC-MS$^2$Struct) Without chrirality encoding (2D)
###Code
tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__sirius__2D, test="ttest", ks=[1, 5, 10, 20])
tab.pivot(columns=["k", "scoring_method"], index=["dataset", "n_samples"], values="top_k_acc__as_labels")
###Output
_____no_output_____
###Markdown
With chirality encoding (3D)
###Code
tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__sirius__3D, test="ttest", ks=[1, 5, 10, 20])
tab.pivot(columns=["k", "scoring_method"], index=["dataset", "n_samples"], values="top_k_acc__as_labels")
###Output
_____no_output_____
###Markdown
CFM-ID
###Code
# SSVM (2D)
setting = {"ds": "*", "mol_feat": "FCFP__binary__all__2D", "mol_id": "cid", "ms2scorer": "cfmid4__norm", "ssvm_flavor": "default", "lloss_mode": "mol_feat_fps"}
res__ssvm__cfmid4__2D = load_topk__publication(
setting, agg_setting, basedir=os.path.join("massbank__with_stereo"), top_k_method="csi", load_max_model_number=True
)
# SSVM (3D)
setting = {"ds": "*", "mol_feat": "FCFP__binary__all__3D", "mol_id": "cid", "ms2scorer": "cfmid4__norm", "ssvm_flavor": "default", "lloss_mode": "mol_feat_fps"}
res__ssvm__cfmid4__3D = load_topk__publication(
setting, agg_setting, basedir=os.path.join("massbank__with_stereo"), top_k_method="csi", load_max_model_number=True
)
# Perform some sanity checks
assert res__ssvm__cfmid4__2D["scoring_method"].nunique() == 2
assert res__ssvm__cfmid4__3D["scoring_method"].nunique() == 2
_check_onlyms(res__ssvm__cfmid4__2D, [res__ssvm__cfmid4__3D])
###Output
Performed tests: [1500.]
###Markdown
Overview result table (LC-MS$^2$Struct) Without chrirality encoding (2D)
###Code
tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__cfmid4__2D, test="ttest", ks=[1, 5, 10, 20])
tab.pivot(columns=["k", "scoring_method"], index=["dataset", "n_samples"], values="top_k_acc__as_labels")
###Output
_____no_output_____
###Markdown
With chirality encoding (3D)
###Code
tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__cfmid4__3D, test="ttest", ks=[1, 5, 10, 20])
tab.pivot(columns=["k", "scoring_method"], index=["dataset", "n_samples"], values="top_k_acc__as_labels")
###Output
_____no_output_____
###Markdown
Visualization of the ranking performanceTop-k curve for each MS2-scoring method: CFM-ID, MetFrag and SIRIUS.
###Code
__tmp__03__a = plot__03__a(
res__baseline=[
res__ssvm__cfmid4__2D[(res__ssvm__cfmid4__2D["scoring_method"] == "Only MS") & (res__ssvm__cfmid4__2D["n_models"] == 8)].assign(scoring_method="Only-MS$^2$", ms2scorer="CFM-ID"),
res__ssvm__metfrag__2D[(res__ssvm__metfrag__2D["scoring_method"] == "Only MS") & (res__ssvm__metfrag__2D["n_models"] == 8)].assign(scoring_method="Only-MS$^2$", ms2scorer="MetFrag"),
res__ssvm__sirius__2D[(res__ssvm__sirius__2D["scoring_method"] == "Only MS") & (res__ssvm__sirius__2D["n_models"] == 8)].assign(scoring_method="Only-MS$^2$", ms2scorer="SIRIUS")
],
res__ssvm__2D=[
res__ssvm__cfmid4__2D[(res__ssvm__cfmid4__2D["scoring_method"] == "MS + RT") & (res__ssvm__cfmid4__2D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (2D)", ms2scorer="CFM-ID"),
res__ssvm__metfrag__2D[(res__ssvm__metfrag__2D["scoring_method"] == "MS + RT") & (res__ssvm__metfrag__2D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (2D)", ms2scorer="MetFrag"),
res__ssvm__sirius__2D[(res__ssvm__sirius__2D["scoring_method"] == "MS + RT") & (res__ssvm__sirius__2D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (2D)", ms2scorer="SIRIUS")
],
res__ssvm__3D=[
res__ssvm__cfmid4__3D[(res__ssvm__cfmid4__3D["scoring_method"] == "MS + RT") & (res__ssvm__cfmid4__3D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (3D)", ms2scorer="CFM-ID"),
res__ssvm__metfrag__3D[(res__ssvm__metfrag__3D["scoring_method"] == "MS + RT") & (res__ssvm__metfrag__3D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (3D)", ms2scorer="MetFrag"),
res__ssvm__sirius__3D[(res__ssvm__sirius__3D["scoring_method"] == "MS + RT") & (res__ssvm__sirius__3D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (3D)", ms2scorer="SIRIUS")
],
max_k=20,
weighted_average=False,
raise_on_missing_results=True,
aspect="landscape",
verbose=True
)
for ext in ["pdf", "svg"]:
plt.savefig(os.path.join(".", os.extsep.join(["plot_03__a", ext])))
__tmp__03__b = plot__03__b(
res__baseline=[
res__ssvm__cfmid4__2D[(res__ssvm__cfmid4__2D["scoring_method"] == "Only MS") & (res__ssvm__cfmid4__2D["n_models"] == 8)].assign(scoring_method="Only-MS$^2$", ms2scorer="CFM-ID"),
res__ssvm__metfrag__2D[(res__ssvm__metfrag__2D["scoring_method"] == "Only MS") & (res__ssvm__metfrag__2D["n_models"] == 8)].assign(scoring_method="Only-MS$^2$", ms2scorer="MetFrag"),
res__ssvm__sirius__2D[(res__ssvm__sirius__2D["scoring_method"] == "Only MS") & (res__ssvm__sirius__2D["n_models"] == 8)].assign(scoring_method="Only-MS$^2$", ms2scorer="SIRIUS")
],
res__ssvm__2D=[
res__ssvm__cfmid4__2D[(res__ssvm__cfmid4__2D["scoring_method"] == "MS + RT") & (res__ssvm__cfmid4__2D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (2D)", ms2scorer="CFM-ID"),
res__ssvm__metfrag__2D[(res__ssvm__metfrag__2D["scoring_method"] == "MS + RT") & (res__ssvm__metfrag__2D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (2D)", ms2scorer="MetFrag"),
res__ssvm__sirius__2D[(res__ssvm__sirius__2D["scoring_method"] == "MS + RT") & (res__ssvm__sirius__2D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (2D)", ms2scorer="SIRIUS")
],
res__ssvm__3D=[
res__ssvm__cfmid4__3D[(res__ssvm__cfmid4__3D["scoring_method"] == "MS + RT") & (res__ssvm__cfmid4__3D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (3D)", ms2scorer="CFM-ID"),
res__ssvm__metfrag__3D[(res__ssvm__metfrag__3D["scoring_method"] == "MS + RT") & (res__ssvm__metfrag__3D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (3D)", ms2scorer="MetFrag"),
res__ssvm__sirius__3D[(res__ssvm__sirius__3D["scoring_method"] == "MS + RT") & (res__ssvm__sirius__3D["n_models"] == 8)].assign(scoring_method="LC-MS$^2$Struct (3D)", ms2scorer="SIRIUS")
],
ks=[1, 20],
weighted_average=False,
raise_on_missing_results=True,
ctype="improvement",
label_format=".0f"
)
for ext in ["pdf", "svg"]:
plt.savefig(os.path.join(".", os.extsep.join(["plot_03__b", ext])))
###Output
_____no_output_____ |
experiments/experiment_3/community_assignment.ipynb | ###Markdown
1. 1st vary the number of significant nodes and sample size (fix non-sig nodes = 15, fix delta=0.25, p = 0.5)2. 2nd vary the number of non-significant nodes and sample size (fix sig nodes = 5, fix delta=0.25, p = 0.5)
###Code
# Experiment 1
spacing = 20
ms = np.linspace(0, 50, spacing + 1)[1:].astype(int)
num_sig_nodes = np.linspace(0, 100, spacing + 1)[1:].astype(int)
num_non_sig_nodes = 15
delta = 0.25
reps = 100
res = Parallel(n_jobs=-1, verbose=1)(
delayed(experiment)(
m=m,
num_sig_nodes=n,
num_non_sig_nodes=num_non_sig_nodes,
delta=delta,
reps=reps,
)
for m, n in product(ms, num_sig_nodes)
)
res_df = pd.DataFrame(res, columns=["m", "num_sig_nodes", "mase_ari", "omni_ari"])
res_df.to_csv("./results/20200130_vary_sig_nodes.csv", index=False)
fmt = lambda x: "{:.2f}".format(x)
with sns.plotting_context("talk", font_scale=1.25):
fig, ax = plt.subplots(
1,
3,
gridspec_kw={"width_ratios": [1, 1, 0.05]},
figsize=(16, 8),
constrained_layout=True,
)
sns.heatmap(
np.flipud(res_df.mase_ari.values.reshape(spacing, -1)),
ax=ax[0],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=num_sig_nodes,
yticklabels=ms[::-1] * 2,
cbar_ax=ax[-1],
vmin=0,
vmax=1,
)
ax[0].set_title("MASE Average ARI")
sns.heatmap(
np.flipud(res_df.omni_ari.values.reshape(spacing, -1)),
ax=ax[1],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
cbar=False,
xticklabels=num_sig_nodes,
yticklabels=[],
# cbar_ax=ax[-1],
vmin=0,
vmax=1,
)
ax[1].set_title("Omni Average ARI")
fig.text(-0.03, 0.5, "Sample Size", va="center", rotation="vertical")
fig.text(0.5, -0.03, "Number of Significant Nodes", va="center", ha="center")
fig.savefig("./figures/20200130_vary_sig_nodes.png", dpi=300, bbox_inches="tight")
# Experiment 2
spacing = 20
ms = np.linspace(0, 50, spacing + 1)[1:].astype(int)
num_sig_nodes = 15
num_non_sig_nodes = np.linspace(0, 100, spacing + 1)[1:].astype(int)
delta = 0.25
reps = 100
res = Parallel(n_jobs=-1, verbose=1)(
delayed(experiment)(
m=m, num_sig_nodes=num_sig_nodes, num_non_sig_nodes=n, delta=delta, reps=reps
)
for m, n in product(ms, num_non_sig_nodes)
)
res_df = pd.DataFrame(res, columns=["m", "num_sig_nodes", "mase_ari", "omni_ari"])
res_df.to_csv("./results/20200130_vary_non_sig_nodes.csv", index=False)
fmt = lambda x: "{:.2f}".format(x)
with sns.plotting_context("talk", font_scale=1.25):
fig, ax = plt.subplots(
1,
3,
gridspec_kw={"width_ratios": [1, 1, 0.05]},
figsize=(16, 8),
constrained_layout=True,
)
sns.heatmap(
np.flipud(res_df.mase_ari.values.reshape(spacing, -1)),
ax=ax[0],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=num_non_sig_nodes,
yticklabels=ms[::-1] * 2,
cbar_ax=ax[-1],
vmin=0,
vmax=1,
)
ax[0].set_title("MASE Average ARI")
sns.heatmap(
np.flipud(res_df.omni_ari.values.reshape(spacing, -1)),
ax=ax[1],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
cbar=False,
xticklabels=num_non_sig_nodes,
yticklabels=[],
# cbar_ax=ax[-1],
vmin=0,
vmax=1,
)
ax[1].set_title("Omni Average ARI")
fig.text(-0.03, 0.5, "Sample Size", va="center", rotation="vertical")
fig.text(0.5, -0.03, "Number of Non-Significant Nodes", va="center", ha="center")
fig.savefig(
"./figures/20200130_vary_non_sig_nodes.png", dpi=300, bbox_inches="tight"
)
###Output
_____no_output_____ |
1-) Image Processing with Cv2/9- Image Histograms with CV2.ipynb | ###Markdown
###Code
golden_gate = cv2.imread("goldengate.jfif")
golden_gate_vis = cv2.cvtColor(golden_gate,cv2.COLOR_BGR2RGB)
plt.figure(),plt.imshow(golden_gate_vis),plt.title("Orijinal")
print(golden_gate.shape)
mask = np.zeros(golden_gate.shape[:2],np.uint8)
plt.figure(),plt.imshow(mask,cmap="gray"),plt.title("MASK")
mask[150:200,0:150] = 255
plt.figure(),plt.imshow(mask,cmap="gray"),plt.title("Mask Size")
masked_img_vis = cv2.bitwise_and(golden_gate,golden_gate,mask=mask)
plt.figure(),plt.imshow(masked_img_vis,cmap="gray")
masked_img = cv2.bitwise_and(golden_gate,golden_gate,mask = mask)
masked_img_hist = cv2.calcHist([golden_gate],channels = [0], mask=mask,histSize=[256],ranges=[0,256])
plt.figure(),plt.plot(masked_img_hist)
###Output
_____no_output_____
###Markdown
Histogram Eşitleme Karşıtlık Arttırma (Kontraslık)
###Code
img = cv2.imread("histogram.jpg",0)
plt.figure(),plt.imshow(img,cmap="gray")
img_hist = cv2.calcHist([img],channels=[0],mask=None,histSize=[256],ranges=[0,256])
plt.figure(), plt.plot(img_hist)
eq_hist = cv2.equalizeHist(img)
plt.figure(),plt.imshow(eq_hist,cmap="gray")
###Output
_____no_output_____
###Markdown
Açık renkliler 255'e koyu renkliler 0'a çekildi.
###Code
eq_img_hist = cv2.calcHist([eq_hist],channels=[0],mask=None,histSize=[256],ranges=[0,256])
plt.figure(),plt.plot(eq_img_hist)
plt.figure(),plt.plot(img_hist)
###Output
_____no_output_____ |
assignments/ndvi_subscene_assignment-PA_long_way.ipynb | ###Markdown
Table of Contents1 Introduction2 Get bands 3, 4, 5 fullsize (green, red, near-ir)3 This cell reads in your affine transform, metadata and profile4 This cell gets the right reflection function for your satellite5 Read only the window pixels from the band 3, 4 files6 In the next cell plot a mapped ndvi image with a red dot in your ul corner and a white dot in your lr corner IntroductionThere are 4 cells that ask for changes below, the rest should run as long as youuse the variable names I ask for in the questions.
###Code
import rasterio
import a301
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
from a301.landsat.landsat_metadata import landsat_metadata
import cartopy
from rasterio import windows
from pyproj import transform as proj_transform
from pyproj import Proj
from a301.landsat.toa_reflectance import calc_reflc_8
import pprint
from a301.utils.data_read import download
from pathlib import Path
from affine import Affine
from IPython.display import Image
from a301.landsat.toa_reflectance import calc_refl_457, calc_reflc_8
from mpl_toolkits.axes_grid1 import make_axes_locatable
###Output
_____no_output_____
###Markdown
Get bands 3, 4, 5 fullsize (green, red, near-ir)At the end of this cell you shiould have the following path objects for your spring scene:meta_bigfile, band3_bigfile, band4_bigfile, band5_bigfilethat point to your landsat TIF and mtl.txt files.
###Code
filenames=["LC08_L1TP_190031_20170528_20170615_01_T1_B3.TIF",
"LC08_L1TP_190031_20170528_20170615_01_T1_B4.TIF",
"LC08_L1TP_190031_20170528_20170615_01_T1_B5.TIF",
"LC08_L1TP_190031_20170528_20170615_01_T1_MTL.txt"]
dest_folder=a301.data_dir / Path("landsat8/italy")
band3_bigfile=list(dest_folder.glob("*_B3.TIF"))[0]
band4_bigfile=list(dest_folder.glob("*_B4.TIF"))[0]
band5_bigfile=list(dest_folder.glob("*_B5.TIF"))[0]
meta_bigfile=list(dest_folder.glob("*MTL.txt"))[0]
###Output
_____no_output_____
###Markdown
This cell reads in your affine transform, metadata and profileUsing band4_bigfile (arbitrary)
###Code
metadata=landsat_metadata(meta_bigfile)
with rasterio.open(str(band4_bigfile)) as raster:
big_transform=raster.affine
big_profile=raster.profile
zone = metadata.UTM_ZONE
crs = cartopy.crs.UTM(zone, southern_hemisphere=False)
p_utm=Proj(crs.proj4_init)
p_lonlat=Proj(proj='latlong',datum='WGS84')
###Output
Scene LC81900312017148LGN00 center time is 2017-05-28 09:46:46
###Markdown
This cell gets the right reflection function for your satellite
###Code
refl_dict={'LANDSAT_7':calc_refl_457,'LANDSAT_8':calc_reflc_8}
satellite=metadata.SPACECRAFT_ID
refl_fun=refl_dict[satellite]
###Output
_____no_output_____
###Markdown
Define a subscene window and a transformIn the cell below, get the upper left col,row (ul_col,ul_row) and upper left and lowerright x,y (ul_x,ul_y,lr_x,lr_y)coordinates the upper left corner of your subscene as in the image_zoom notebook. Use ul_col, ul_row, ul_x, ul_y plus your subscenewidth and height to make a rasterio window and new transform. window=Window(ul_col, ul_row, small_width, small_height) new_affine=Affine(30.,0.,ul_x,0.,-30.,ul_y) extent = [ul_x,lr_x,lr_y,ul_y]
###Code
italy_lon = 13.66477
italy_lat = 41.75983
italy_x, italy_y =proj_transform(p_lonlat,p_utm,italy_lon, italy_lat)
full_ul_xy=np.array(big_transform*(0,0))
print(f"orig ul corner x,y (km)={full_ul_xy*1.e-3}")
ul_col, ul_row = ~big_transform*(italy_x,italy_y)
ul_col, ul_row = int(ul_col), int(ul_row)
l_col_offset= -1300
r_col_offset= +2000
b_row_offset= +2600
t_row_offset= -100
col_slice=slice(ul_col+l_col_offset,ul_col+r_col_offset)
row_slice=slice(ul_row + t_row_offset, ul_row + b_row_offset)
italy_ul_xy = big_transform*(col_slice.start,row_slice.start)
italy_lr_xy = big_transform*(col_slice.stop,row_slice.stop)
small_height, small_width = 2700,3300
ul_x, ul_y = italy_ul_xy[0], italy_ul_xy[1]
# window=Window(ul_col, ul_row, small_width, small_height)
new_affine=Affine(30.,0.,ul_x,0.,-30.,ul_y)
image_extent=[italy_ul_xy[0],italy_lr_xy[0],italy_ul_xy[1],italy_lr_xy[1]]
###Output
orig ul corner x,y (km)=[ 271.185 4742.715]
###Markdown
Read only the window pixels from the band 3, 4, 5 files
###Code
from a301.landsat.toa_reflectance import toa_reflectance_8
refl_vals=toa_reflectance_8([3,4,5],meta_bigfile)
refl_dict=dict()
for bandnum,filepath in zip([3,4,5],[band3_bigfile,band4_bigfile,band5_bigfile]):
with rasterio.open(str(filepath)) as src:
refl_dict[bandnum]=refl_vals
###Output
Scene LC81900312017148LGN00 center time is 2017-05-28 09:46:46
###Markdown
In the next cell calculate your ndviSave it in a variable called ndvi
###Code
# YOUR CODE HERE
ndvi = (refl_vals[5] - refl_vals[4])/(refl_vals[5] + refl_vals[4])
plt.hist(ndvi[~np.isnan(ndvi)].flat);
plt.title('spring ndvi')
plt.savefig('spring_ndvi.png')
###Output
_____no_output_____
###Markdown
In the next cell plot a mapped ndvi image with a red dot in your ul corner and a white dot in your lr cornerAdjust this plot to fit your image. Just delete the bottom line and work with the provided commands
###Code
vmin=0.0
vmax=0.8
the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)
palette='viridis'
pal = plt.get_cmap(palette)
pal.set_bad('0.75') #75% grey for out-of-map cells
pal.set_over('w') #color cells > vmax red
pal.set_under('k') #color cells < vmin black
fig, ax = plt.subplots(1, 1,figsize=[10,15],
subplot_kw={'projection': crs})
col=ax.imshow(ndvi,origin="upper",
extent=image_extent,transform=crs)
ax.plot(italy_ul_xy[0],italy_ul_xy[1],'wo',markersize=50)
ax.plot(italy_lr_xy[0],italy_lr_xy[1],'ro',markersize=50)
ax.set(title="spring ndvi")
cbar_ax = fig.add_axes([0.95, 0.2, 0.05, 0.6])
cbar=ax.figure.colorbar(col,extend='both',cax=cbar_ax,orientation='vertical')
cbar.set_label('ndvi index')
###Output
_____no_output_____ |