diff --git a/.gitignore b/.gitignore
index 187e526..b6c2344 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,7 @@
.*classpath
build
*.pyc
+*.swp
+*.log
+.DS_Store
+.vscode
diff --git a/README b/README
deleted file mode 100644
index 3c06a5f..0000000
--- a/README
+++ /dev/null
@@ -1,7 +0,0 @@
-# This is the readme file for python.git in Jay's git repo hosted by github.com
-https://github.com/smilejay/python.git
-git://github.com/smilejay/python.git
-
-# Just for fun.
-# thanks,
-# Jay.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d47c7a9
--- /dev/null
+++ b/README.md
@@ -0,0 +1,12 @@
+#### This is the README file of python.git in Jay's git repos hosted on github.com
+* https://github.com/smilejay/python.git
+* git://github.com/smilejay/python.git
+
+#### My homepage on github.com: https://github.com/smilejay
+```shell
+Just for fun.
+ thanks,
+ Jay.
+```
+
+#### Also, you can contact me via my blog: http://smilejay.com/
diff --git a/leetcode/easy/intersection.py b/leetcode/easy/intersection.py
new file mode 100644
index 0000000..fd74953
--- /dev/null
+++ b/leetcode/easy/intersection.py
@@ -0,0 +1,52 @@
+# -*- coding:UTF-8 -*-
+'''
+给定两个数组,编写一个函数来计算它们的交集。
+
+示例 1:
+输入:nums1 = [1,2,2,1], nums2 = [2,2]
+输出:[2]
+
+示例 2:
+输入:nums1 = [4,9,5], nums2 = [9,4,9,8,4]
+输出:[9,4]
+
+
+说明:
+ * 输出结果中的每个元素一定是唯一的。
+ * 我们可以不考虑输出结果的顺序。
+
+来源:力扣(LeetCode)
+链接:https://leetcode-cn.com/problems/intersection-of-two-arrays
+'''
+
+class Solution(object):
+ def intersection(self, nums1, nums2):
+ """
+ :type nums1: List[int]
+ :type nums2: List[int]
+ :rtype: List[int]
+ """
+ """
+ 元组set 是基于Hash来实现的,故:
+ 1. set 中的元素必须唯一(不可重复)
+ 2. set 中的元素必须是Hashable的
+ 3. 向集合中添加元素、删除元素、检查元素是否存在,都是非常快速的。平均时间复杂度为O(1),最坏的时间复杂度是O(n)
+
+ 本算法 时间复杂度 O(n+m) 空间复杂度 O(n+m)
+ """
+ ret = []
+ s1 = set(nums1)
+ s2 = set(nums2)
+ # 考虑到检查集合中元素O(1)的复杂度,所以只遍历较小的集合是时间复杂度更低的
+ if len(s1) > len(s2):
+ ret = [i for i in s2 if i in s1]
+ else:
+ ret = [i for i in s1 if i in s2]
+ return ret
+
+
+if __name__ == '__main__':
+ nums1 = [4,9,5]
+ nums2 = [9,4,9,8,4]
+ s = Solution()
+ print(s.intersection(nums1, nums2))
diff --git a/leetcode/easy/remove_duplicates.py b/leetcode/easy/remove_duplicates.py
new file mode 100644
index 0000000..47143bb
--- /dev/null
+++ b/leetcode/easy/remove_duplicates.py
@@ -0,0 +1,87 @@
+# -*- coding:UTF-8 -*-
+'''
+给出由小写字母组成的字符串 S,重复项删除操作会选择两个相邻且相同的字母,并删除它们。
+在 S 上反复执行重复项删除操作,直到无法继续删除。
+在完成所有重复项删除操作后返回最终的字符串。答案保证唯一。
+
+示例:
+
+输入:"abbaca"
+输出:"ca"
+解释:
+例如,在 "abbaca" 中,我们可以删除 "bb" 由于两字母相邻且相同,这是此时唯一可以执行删除操作的重复项。之后我们得到字符串 "aaca",其中又只有 "aa" 可以执行重复项删除操作,所以最后的字符串为 "ca"。
+
+提示:
+ * 1 <= S.length <= 20000
+ * S 仅由小写英文字母组成。
+
+来源:力扣(LeetCode)
+链接:https://leetcode-cn.com/problems/remove-all-adjacent-duplicates-in-string
+'''
+
+class Solution(object):
+ def removeDuplicates(self, S):
+ """
+ :type S: str
+ :rtype: str
+ 这是通过递归的方法来做,时间复杂度很高 O(n^2)
+ """
+ length = len(S)
+ for i in range(length-1):
+ if S[i] == S[i+1]:
+ S1 = S.replace('%s%s' % (S[i], S[i+1]), '')
+ # print(S1)
+ return self.removeDuplicates(S1)
+ return S
+
+
+ def removeDuplicates_1(self, S):
+ """
+ :type S: str
+ :rtype: str
+ 使用 栈,代码非常简洁,时间复杂度 O(n) ,空间复杂度O(n)
+ 消除一对相邻重复项可能会导致新的相邻重复项出现,如从字符串 abba中删除 bb 会导致出现新的相邻重复项 aa 出现。
+ 因此我们需要保存当前还未被删除的字符。一种显而易见的数据结构呼之欲出:栈。
+ 我们只需要遍历该字符串,如果当前字符和栈顶字符相同,我们就贪心地将其消去,否则就将其入栈即可。
+ """
+ stack = ['']
+ for i in S:
+ if i == stack[-1]:
+ stack.pop(-1)
+ else:
+ stack.append(i)
+ return ''.join(stack)
+
+
+ def removeDuplicates_2(self, S):
+ """
+ :type S: str
+ :rtype: str
+ 使用 双指针 , 时间复杂度 O(n),空间复杂度 O(n)
+ 挨着两个相同的同时消失,可以使用两个指针。
+ * 一个right一直往右移动,然后把指向的值递给left指向的值即可。
+ * 一个left每次都会比较挨着的两个是否相同,如果相同,他两同时消失
+ """
+ left = 0
+ right = 0
+ length = len(S)
+ l1 = list(S)
+ while (right < length):
+ l1[left] = l1[right]
+ if (left > 0) and l1[left - 1] == l1[left]:
+ left -= 2
+ left += 1
+ right += 1
+ return ''.join(l1[:left])
+
+
+if __name__ == '__main__':
+ s = Solution()
+ str1 = "aababaab"
+ print(s.removeDuplicates(str1))
+ print(s.removeDuplicates_1(str1))
+ print(s.removeDuplicates_2(str1))
+ str1 = "abbaca"
+ print(s.removeDuplicates(str1))
+ print(s.removeDuplicates_1(str1))
+ print(s.removeDuplicates_2(str1))
diff --git a/leetcode/easy/reverse_list.py b/leetcode/easy/reverse_list.py
new file mode 100644
index 0000000..2aed9b8
--- /dev/null
+++ b/leetcode/easy/reverse_list.py
@@ -0,0 +1,84 @@
+# *_* coding=utf-8 *_*
+
+'''
+反转链表:
+https://leetcode-cn.com/problems/reverse-linked-list/
+
+实现了一个链表;并且用 迭代/递归 两种方法进行反转。
+
+1. 迭代:时间复杂度 O(N) 空间复杂度 O(1)
+2. 递归:时间复杂度 O(N) 空间复杂度 O(N)
+
+'''
+
+
+# Definition for singly-linked list.
+class ListNode(object):
+ def __init__(self, val=0, next=None):
+ self.val = val
+ self.next = next
+
+
+class LinkedList(object):
+ # 通过一个list 初始化一个链表
+ def __init__(self, l=[1, 2, 3, 4, 5, 6]):
+ self.head = ListNode(l[0])
+ cur = self.head
+ for i in l[1:]:
+ cur.next = ListNode(i)
+ cur = cur.next
+
+ def print_linked_list(self):
+ # 为了保持链表 head 不被破坏
+ temp_head = self.head
+ while temp_head:
+ print(temp_head.val)
+ temp_head = temp_head.next
+
+
+class Solution(object):
+ # 遍历方式反转链表
+ def reverse_list(self, head):
+ """
+ :type head: ListNode
+ :rtype: ListNode
+ """
+ cur, pre = head, None
+ while cur:
+ # pre, pre.next, cur = cur, pre, cur.next
+ temp = cur.next
+ cur.next = pre
+ pre = cur
+ cur = temp
+ return pre
+
+ # 递归方式反转链表
+ def reverse_list_recursion(self, head):
+ """
+ :type head: ListNode
+ :rtype: ListNode
+ """
+ if (not head) or (not head.next):
+ return head
+ node = self.reverse_list_recursion(head.next)
+ head.next.next = head
+ head.next = None
+ return node
+
+
+if __name__ == '__main__':
+ ll = LinkedList(l=[1, 2, 3, 4, 5])
+ print('------------------------------')
+ print('before reverse')
+ ll.print_linked_list()
+ h = ll.head
+ s = Solution()
+ ll.head = s.reverse_list(h)
+ print('------------------------------')
+ print('after reverse: reverse_list()')
+ ll.print_linked_list()
+ print('------------------------------')
+ ll.head = s.reverse_list_recursion(ll.head)
+ print('after reverse: reverse_list_recursion()')
+ ll.print_linked_list()
+
diff --git a/leetcode/easy/two_num_sum.py b/leetcode/easy/two_num_sum.py
new file mode 100644
index 0000000..5978a4a
--- /dev/null
+++ b/leetcode/easy/two_num_sum.py
@@ -0,0 +1,51 @@
+# *_* coding=utf-8 *_*
+
+'''
+给定一个整数数组 nums 和一个整数目标值 target,请你在该数组中找出 和为目标值 的那 两个 整数,并返回它们的数组下标。
+你可以假设每种输入只会对应一个答案。但是,数组中同一个元素在答案里不能重复出现。
+你可以按任意顺序返回答案。
+
+链接:https://leetcode-cn.com/problems/two-sum
+
+1. 暴力破解:时间复杂度 O(N^2) 空间复杂度 O(1)
+2. 哈希表:时间复杂度 O(N) 空间复杂度 O(N)
+
+'''
+
+class Solution(object):
+ def twoSum(self, nums, target):
+ """
+ :type nums: List[int]
+ :type target: int
+ :rtype: List[int]
+ """
+ n = len(nums)
+ for i in range(n):
+ for j in range(i + 1, n):
+ if nums[i] + nums[j] == target:
+ return [i, j]
+ return ['not-found', 'not-found']
+
+ def twoSum_1(self, nums, target):
+ hashtable = dict()
+ for i, num in enumerate(nums):
+ if target - num in hashtable:
+ return [hashtable[target - num], i]
+ hashtable[nums[i]] = i
+ return ['not-found', 'not-found']
+
+
+if __name__ == '__main__':
+ num_list = [1, 7, 9, 4, 53, 42]
+ sum = 62
+ s = Solution()
+ print(s.twoSum(num_list, sum))
+ print(s.twoSum_1(num_list, sum))
+ num_list = [3, 2, 4]
+ sum = 6
+ print(s.twoSum(num_list, sum))
+ print(s.twoSum_1(num_list, sum))
+ num_list = [3, 3]
+ sum = 6
+ print(s.twoSum(num_list, sum))
+ print(s.twoSum_1(num_list, sum))
diff --git a/leetcode/easy/word_pattern.py b/leetcode/easy/word_pattern.py
new file mode 100644
index 0000000..9eee48e
--- /dev/null
+++ b/leetcode/easy/word_pattern.py
@@ -0,0 +1,89 @@
+# *_* coding=utf-8 *_*
+
+'''
+给定一种规律 pattern 和一个字符串 str ,判断 str 是否遵循相同的规律。
+这里的 遵循 指完全匹配,例如, pattern 里的每个字母和字符串 str 中的每个非空单词之间存在着双向连接的对应规律。
+
+说明: 你可以假设 pattern 只包含小写字母, str 包含了由单个空格分隔的小写字母。
+
+示例1:
+输入: pattern = "abba", str = "dog cat cat dog"
+输出: true
+
+示例 2:
+输入:pattern = "abba", str = "dog cat cat fish"
+输出: false
+
+链接:https://leetcode-cn.com/problems/word-pattern
+
+以第2中为例:时间复杂度O(n+m),空间复杂度O(n+m)
+'''
+
+class Solution(object):
+ def wordPattern(self, pattern, s):
+ """
+ :type pattern: str
+ :type s: str
+ :rtype: bool
+ """
+ s_list = s.strip().split()
+ my_map = dict()
+ if len(pattern) != len(s_list):
+ return False
+ for i, j in zip(pattern, s_list):
+ if i in my_map:
+ if my_map[i] != j:
+ return False
+ else:
+ if j in my_map.values():
+ return False
+ else:
+ my_map[i] = j
+ return True
+
+ def wordPattern_1(self, pattern, s):
+ """
+ :type pattern: str
+ :type s: str
+ :rtype: bool
+ """
+ s_list = s.strip().split()
+ ch_map = dict()
+ word_map = dict()
+ if len(pattern) != len(s_list):
+ return False
+ for i, j in zip(pattern, s_list):
+ if (i in ch_map and ch_map[i] != j) or (j in word_map and word_map[j] != i):
+ return False
+ else:
+ ch_map[i] = j
+ word_map[j] = i
+ return True
+
+ def wordPattern_2(self, pattern, s):
+ """
+ :type pattern: str
+ :type s: str
+ :rtype: bool
+ """
+ res=s.split()
+ return list(map(pattern.index, pattern))==list(map(res.index,res))
+
+
+if __name__ == '__main__':
+ s = Solution()
+ p1 = 'abba'
+ s1 = 'dog cat cat dog'
+ print(s.wordPattern(p1, s1))
+ print(s.wordPattern_1(p1, s1))
+ print(s.wordPattern_2(p1, s1))
+ p2 = 'abba'
+ s2 = 'dog cat cat fish'
+ print(s.wordPattern(p2, s2))
+ print(s.wordPattern_1(p2, s2))
+ print(s.wordPattern_2(p2, s2))
+ p3 = 'abba'
+ s3 = 'dog dog dog dog'
+ print(s.wordPattern(p3, s3))
+ print(s.wordPattern_1(p3, s3))
+ print(s.wordPattern_2(p3, s3))
diff --git a/leetcode/medium/buy_sell_stock_once_max_profit.py b/leetcode/medium/buy_sell_stock_once_max_profit.py
new file mode 100644
index 0000000..31ff737
--- /dev/null
+++ b/leetcode/medium/buy_sell_stock_once_max_profit.py
@@ -0,0 +1,34 @@
+# -*- coding:UTF-8 -*-
+'''
+给定一个数组 prices ,它的第 i 个元素 prices[i] 表示一支给定股票第 i 天的价格。
+你只能选择 某一天 买入这只股票,并选择在 未来的某一个不同的日子 卖出该股票。设计一个算法来计算你所能获取的最大利润。
+返回你可以从这笔交易中获取的最大利润。如果你不能获取任何利润,返回 0 。
+
+思路:
+如果我是在历史最低点买的股票就好了!太好了,在题目中,我们只要用一个变量记录一个历史最低价格 minprice,
+我们就可以假设自己的股票是在那天买的。那么我们在第 i 天卖出股票能得到的利润就是 prices[i] - minprice。
+因此,我们只需要遍历价格数组一遍,记录历史最低点,然后在每一天考虑这么一个问题:
+如果我是在历史最低点买进的,那么我今天卖出能赚多少钱?当考虑完所有天数之时,我们就得到了最好的答案。
+
+
+来源:力扣(LeetCode)
+链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock
+'''
+
+
+class Solution:
+ def maxProfit(self, prices):
+ min_price = float('inf')
+ max_profit = 0
+ for price in prices:
+ min_price = min(min_price, price)
+ max_profit = max(max_profit, price - min_price)
+ return max_profit
+
+
+if __name__ == '__main__':
+ l1 = [7,1,5,3,6,4]
+ l2 = [7,6,4,3,1]
+ s = Solution()
+ print(s.maxProfit(l1))
+ print(s.maxProfit(l2))
diff --git a/leetcode/medium/length_of_longest_substring.py b/leetcode/medium/length_of_longest_substring.py
new file mode 100644
index 0000000..17b7e73
--- /dev/null
+++ b/leetcode/medium/length_of_longest_substring.py
@@ -0,0 +1,62 @@
+# -*- coding:UTF-8 -*-
+'''
+给定一个字符串 s ,请你找出其中不含有重复字符的 最长子串 的长度。
+
+示例 1:
+输入: s = "abcabcbb"
+输出: 3
+解释: 因为无重复字符的最长子串是 "abc",所以其长度为 3。
+
+示例 2:
+输入: s = "bbbbb"
+输出: 1
+解释: 因为无重复字符的最长子串是 "b",所以其长度为 1。
+
+s 由英文字母、数字、符号和空格组成
+
+
+思路:
+我们使用两个指针表示字符串中的某个子串(或窗口)的左右边界,其中左指针代表着上文中「枚举子串的起始位置」,而右指针即为上文中的 rk ;
+在每一步的操作中,我们会将左指针向右移动一格,表示 我们开始枚举下一个字符作为起始位置,然后我们可以不断地向右移动右指针,但需要保证这两个指针对应的子串中没有重复的字符。在移动结束后,这个子串就对应着 以左指针开始的,不包含重复字符的最长子串。我们记录下这个子串的长度;
+在枚举结束后,我们找到的最长的子串的长度即为答案。
+
+判断 是否有重复的字符,常用的数据结构为哈希集合 python中用set()
+在左指针向右移动的时候,我们从哈希集合中移除一个字符,在右指针向右移动的时候,我们往哈希集合中添加一个字符。
+
+
+作者:LeetCode-Solution
+链接:https://leetcode-cn.com/problems/longest-substring-without-repeating-characters/solution/wu-zhong-fu-zi-fu-de-zui-chang-zi-chuan-by-leetc-2/
+来源:力扣(LeetCode)
+著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
+
+来源:力扣(LeetCode)
+链接:https://leetcode-cn.com/problems/longest-substring-without-repeating-characters/solution/wu-zhong-fu-zi-fu-de-zui-chang-zi-chuan-by-leetc-2/
+'''
+
+
+class Solution:
+ def lengthOfLongestSubstring(self, s):
+ # 哈希集合,记录每个字符是否出现过
+ occ = set()
+ n = len(s)
+ # 右指针,初始值为 -1,相当于我们在字符串的左边界的左侧,还没有开始移动
+ rk, ans = -1, 0
+ for i in range(n):
+ if i != 0:
+ # 左指针向右移动一格,移除一个字符
+ occ.remove(s[i - 1])
+ while rk + 1 < n and s[rk + 1] not in occ:
+ # 不断地移动右指针
+ occ.add(s[rk + 1])
+ rk += 1
+ # 第 i 到 rk 个字符是一个极长的无重复字符子串
+ ans = max(ans, rk - i + 1)
+ return ans
+
+
+if __name__ == '__main__':
+ s1 = 'abcabcbb'
+ s2 = 'bbbbbb'
+ s = Solution()
+ print(s.lengthOfLongestSubstring(s1))
+ print(s.lengthOfLongestSubstring(s2))
diff --git a/py-libvirt/destroy_domains.py b/py-libvirt/destroy_domains.py
new file mode 100644
index 0000000..0d22d65
--- /dev/null
+++ b/py-libvirt/destroy_domains.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+import time
+import libvirt
+
+
+def destroy_domains():
+ '''
+ destroy all domains() via libvirt python API.
+ '''
+ conn = libvirt.open(None)
+ if conn:
+ for i in conn.listDomainsID():
+ dom = conn.lookupByID(i)
+ dom.destroy()
+ time.sleep(1)
+ if conn.listDomainsID():
+ print 'ERROR! there are live domains.'
+ else:
+ print 'Failed to open connection to the hypervisor'
+
+
+if __name__ == '__main__':
+ destroy_domains()
diff --git a/py2011/VisitBlog.py b/py2011/VisitBlog.py
index faff91f..2df20e9 100644
--- a/py2011/VisitBlog.py
+++ b/py2011/VisitBlog.py
@@ -13,14 +13,16 @@
urls = ['kvm_theory_practice/',
'about/',
- 'i_will_laugh_at_the_world/'
- ]
+ 'i_will_laugh_at_the_world/'
+ ]
visitTimesPerPage = 10
+
def usage():
print('Usage:', sys.argv[0], 'host')
+
def main(argv):
host = argv[1]
if host == '':
@@ -34,22 +36,23 @@ def main(argv):
class VisitPageThread(threading.Thread):
+
def __init__(self, threadName, host, url):
- threading.Thread.__init__(self, name = threadName)
+ threading.Thread.__init__(self, name=threadName)
self.host = host
self.url = url
-
+
def run(self):
url = self.host + self.url
req = urllib.request.Request(url)
req.set_proxy('companyname.com:911', 'http')
- #you may set you proxy here.
+ # you may set you proxy here.
try:
doc = urllib.request.urlopen(req).read()
print(doc)
except Exception as e:
- print("urlopen Exception : %s" %e)
+ print("urlopen Exception : %s" % e)
-if __name__=='__main__':
+if __name__ == '__main__':
sys.argv.append('http://smilejay.com/')
main(sys.argv)
diff --git a/py2011/get_ip_and_location.py b/py2011/get_ip_and_location.py
index 79c060f..5a4dc50 100755
--- a/py2011/get_ip_and_location.py
+++ b/py2011/get_ip_and_location.py
@@ -10,29 +10,31 @@
import re
import urllib.request
+
def get_reponse_from_url(url):
- req = urllib.request.Request(url)
- encoding = 'gbk'
- try:
- doc = urllib.request.urlopen(req).read()
-# print(doc.decode(encoding))
- return doc.decode(encoding)
- except Exception as e:
- print("urlopen Exception : %s" %e)
+ req = urllib.request.Request(url)
+ encoding = 'gbk'
+ try:
+ doc = urllib.request.urlopen(req).read()
+ # print(doc.decode(encoding))
+ return doc.decode(encoding)
+ except Exception as e:
+ print("urlopen Exception : %s" % e)
+
def get_ip_and_location():
- url_ip_qq = "http://fw.qq.com/ipaddress"
- url_location_youdao = "http://www.youdao.com/smartresult-xml/search.s?type=ip&q="
- re_ip = "((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))"
- str_ip = get_reponse_from_url(url_ip_qq)
- ip = re.search(re_ip, str_ip).group(1)
- #print("ip="+ip.group(1))
- print("your ip is:"+ip)
- url_location_youdao += ip
- str_location = get_reponse_from_url(url_location_youdao)
- re_location = '(.*)'
- location = re.search(re_location,str_location).group(1)
- print("you are here:"+location)
+ url_ip_qq = "http://fw.qq.com/ipaddress"
+ url_location_youdao = "http://www.youdao.com/smartresult-xml/search.s?type=ip&q="
+ re_ip = "((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))"
+ str_ip = get_reponse_from_url(url_ip_qq)
+ ip = re.search(re_ip, str_ip).group(1)
+ # print("ip="+ip.group(1))
+ print("your ip is:"+ip)
+ url_location_youdao += ip
+ str_location = get_reponse_from_url(url_location_youdao)
+ re_location = '(.*)'
+ location = re.search(re_location, str_location).group(1)
+ print("you are here:"+location)
if __name__ == '__main__':
- get_ip_and_location()
\ No newline at end of file
+ get_ip_and_location()
diff --git a/py2011/get_rpms.py b/py2011/get_rpms.py
index f706b85..e087d9b 100644
--- a/py2011/get_rpms.py
+++ b/py2011/get_rpms.py
@@ -11,51 +11,56 @@
import re
import os
+
def get_rpm_list(host, url, rpm_list):
- conn = http.client.HTTPConnection(host)
- conn.request("GET", url)
- res = conn.getresponse()
- data = res.read()
- str = data.decode("utf-8")
- parser = my_html_parser(rpm_list)
- parser.feed(str)
+ conn = http.client.HTTPConnection(host)
+ conn.request("GET", url)
+ res = conn.getresponse()
+ data = res.read()
+ str = data.decode("utf-8")
+ parser = my_html_parser(rpm_list)
+ parser.feed(str)
+
class my_html_parser(HTMLParser):
- def __init__(self, rpm_list):
- HTMLParser.__init__(self)
- rpm_list = rpm_list
- def handle_starttag(self, tag, attrs):
- if tag == 'a':
- for name, value in attrs:
- if name == "href":
- if re.search('\.rpm$', value):
- rpm_list.append(value)
+ def __init__(self, rpm_list):
+ HTMLParser.__init__(self)
+ rpm_list = rpm_list
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'a':
+ for name, value in attrs:
+ if name == "href":
+ if re.search('\.rpm$', value):
+ rpm_list.append(value)
+
def download_rpms(rpm_list):
- path = base_dir + dir +'/'
- for rpm in rpm_list:
- rpm_url = url+rpm
- local_name = path + rpm
- if os.path.exists(local_name):
- os.remove(local_name)
- f = open(local_name, 'wb')
- conn = http.client.HTTPConnection(host)
- conn.request("GET", rpm_url)
- res = conn.getresponse()
- f.write(res.read())
+ path = base_dir + dir + '/'
+ for rpm in rpm_list:
+ rpm_url = url+rpm
+ local_name = path + rpm
+ if os.path.exists(local_name):
+ os.remove(local_name)
+ f = open(local_name, 'wb')
+ conn = http.client.HTTPConnection(host)
+ conn.request("GET", rpm_url)
+ res = conn.getresponse()
+ f.write(res.read())
+
def prepare_dir(base_dir, dir):
- path = base_dir + dir
- if not os.path.exists(path):
- os.makedirs(path)
+ path = base_dir + dir
+ if not os.path.exists(path):
+ os.makedirs(path)
if __name__ == '__main__':
- host = 'XXX.XXX.com'
- url = "/pub/ISO/redhat/redhat-rhel6/RHEL-6.2-GA/Server/optional/x86_64/os/Packages/"
- rpm_list = []
- base_dir = '/home/master/Downloads/'
- dir = 'temp_packages'
- get_rpm_list(host, url, rpm_list)
- prepare_dir(base_dir, dir)
- download_rpms(rpm_list)
+ host = 'XXX.XXX.com'
+ url = "/pub/ISO/redhat/redhat-rhel6/RHEL-6.2-GA/Server/optional/x86_64/os/Packages/"
+ rpm_list = []
+ base_dir = '/home/master/Downloads/'
+ dir = 'temp_packages'
+ get_rpm_list(host, url, rpm_list)
+ prepare_dir(base_dir, dir)
+ download_rpms(rpm_list)
diff --git a/py2011/socket_client.py b/py2011/socket_client.py
index c5171c2..8dafa7e 100755
--- a/py2011/socket_client.py
+++ b/py2011/socket_client.py
@@ -6,15 +6,16 @@
import socket
+
def socket_client():
- HOST = '127.0.0.1'# The remote host
- PORT = 5007 # The same port as used by the server
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect((HOST, PORT))
- s.send(b'Hello, world')
- data = s.recv(1024)
- s.close()
- print('Received', repr(data))
+ HOST = '127.0.0.1' # The remote host
+ PORT = 5007 # The same port as used by the server
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((HOST, PORT))
+ s.send(b'Hello, world')
+ data = s.recv(1024)
+ s.close()
+ print('Received', repr(data))
if __name__ == '__main__':
- socket_client()
\ No newline at end of file
+ socket_client()
diff --git a/py2011/socket_server.py b/py2011/socket_server.py
index e0d0e14..1ffca3b 100755
--- a/py2011/socket_server.py
+++ b/py2011/socket_server.py
@@ -6,19 +6,21 @@
import socket
+
def socket_server():
- HOST = '127.0.0.1' # Symbolic name meaning all available interfaces
- PORT = 5007 # Arbitrary non-privileged port
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind((HOST, PORT))
- s.listen(1)
- conn, addr = s.accept()
- print('Connected by', addr)
- while True:
- data = conn.recv(1024)
- if not data: break
- conn.send(data)
- conn.close()
+ HOST = '127.0.0.1' # Symbolic name meaning all available interfaces
+ PORT = 5007 # Arbitrary non-privileged port
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.bind((HOST, PORT))
+ s.listen(1)
+ conn, addr = s.accept()
+ print('Connected by', addr)
+ while True:
+ data = conn.recv(1024)
+ if not data:
+ break
+ conn.send(data)
+ conn.close()
if __name__ == '__main__':
- socket_server()
\ No newline at end of file
+ socket_server()
diff --git a/py2013/date_test.py b/py2013/date_test.py
index 03d2585..c160adb 100644
--- a/py2013/date_test.py
+++ b/py2013/date_test.py
@@ -9,13 +9,18 @@
import datetime
import calendar
+
def get_a_month_val(year, month, day=1):
day = datetime.datetime(year, month, day)
print day
- last_day_of_previous_month = day.replace(day=1) - datetime.timedelta(days=1)
- last_day_of_this_month = day.replace(day=calendar.monthrange(year, month)[1])
- print last_day_of_previous_month
- print last_day_of_this_month
+ last_day_previous_month = day.replace(day=1) - datetime.timedelta(days=1)
+ first_day_this_month = day.replace(day=1)
+ last_day_this_month = day.replace(day=calendar.monthrange(year, month)[1])
+ last_day_this_month_1 = day.replace(day=calendar.mdays[day.month])
+ print last_day_previous_month
+ print first_day_this_month
+ print last_day_this_month
+ print last_day_this_month_1
if __name__ == '__main__':
get_a_month_val(2013, 10, 20)
diff --git a/py2013/iplocation.py b/py2013/iplocation.py
index 8968bdb..77bc936 100644
--- a/py2013/iplocation.py
+++ b/py2013/iplocation.py
@@ -7,7 +7,8 @@
@author: Jay http://smilejay.com/
'''
-import json, urllib2
+import json
+import urllib2
import re
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
@@ -18,7 +19,7 @@ class location_freegeoip():
build the mapping of the ip address and its location.
the geo info is from
'''
-
+
def __init__(self, ip):
'''
Constructor of location_freegeoip class
@@ -26,10 +27,10 @@ def __init__(self, ip):
self.ip = ip
self.api_format = 'json'
self.api_url = 'http://freegeoip.net/%s/%s' % (self.api_format, self.ip)
-
+
def get_geoinfo(self):
""" get the geo info from the remote API.
-
+
return a dict about the location.
"""
urlobj = urllib2.urlopen(self.api_url)
@@ -37,22 +38,23 @@ def get_geoinfo(self):
datadict = json.loads(data, encoding='utf-8')
# print datadict
return datadict
-
+
def get_country(self):
key = 'country_name'
datadict = self.get_geoinfo()
- return datadict[key]
-
+ return datadict[key]
+
def get_region(self):
key = 'region_name'
datadict = self.get_geoinfo()
return datadict[key]
-
+
def get_city(self):
key = 'city'
datadict = self.get_geoinfo()
return datadict[key]
+
class location_taobao():
'''
build the mapping of the ip address and its location
@@ -60,13 +62,14 @@ class location_taobao():
e.g. http://ip.taobao.com/service/getIpInfo.php?ip=112.111.184.63
The getIpInfo API from Taobao returns a JSON object.
'''
+
def __init__(self, ip):
self.ip = ip
self.api_url = 'http://ip.taobao.com/service/getIpInfo.php?ip=%s' % self.ip
-
+
def get_geoinfo(self):
""" get the geo info from the remote API.
-
+
return a dict about the location.
"""
urlobj = urllib2.urlopen(self.api_url)
@@ -74,17 +77,17 @@ def get_geoinfo(self):
datadict = json.loads(data, encoding='utf-8')
# print datadict
return datadict['data']
-
+
def get_country(self):
key = u'country'
datadict = self.get_geoinfo()
return datadict[key]
-
+
def get_region(self):
key = 'region'
datadict = self.get_geoinfo()
return datadict[key]
-
+
def get_city(self):
key = 'city'
datadict = self.get_geoinfo()
@@ -103,26 +106,27 @@ class location_qq():
Note: the content of the Tencent's API return page is encoded by 'gb2312'.
e.g. http://ip.qq.com/cgi-bin/searchip?searchip1=112.111.184.64
'''
+
def __init__(self, ip):
'''
Construction of location_ipdotcn class.
'''
self.ip = ip
self.api_url = 'http://ip.qq.com/cgi-bin/searchip?searchip1=%s' % ip
-
+
def get_geoinfo(self):
urlobj = urllib2.urlopen(self.api_url)
data = urlobj.read().decode('gb2312').encode('utf8')
pattern = re.compile(r'该IP所在地为:(.+)')
m = re.search(pattern, data)
- if m != None:
+ if m is not None:
return m.group(1).split(' ')
else:
return None
-
+
def get_region(self):
return self.get_geoinfo()[0]
-
+
def get_isp(self):
return self.get_geoinfo()[1]
@@ -133,18 +137,21 @@ class location_ipdotcn():
the geo info is from www.ip.cn
need to use PhantomJS to open the URL to render its JS
'''
+
def __init__(self, ip):
'''
Construction of location_ipdotcn class.
'''
self.ip = ip
self.api_url = 'http://www.ip.cn/%s' % ip
-
+
def get_geoinfo(self):
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/29.0 " )
- driver = webdriver.PhantomJS(executable_path='/usr/local/bin/phantomjs', desired_capabilities=dcap)
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/29.0 ")
+ driver = webdriver.PhantomJS(
+ executable_path='/usr/local/bin/phantomjs',
+ desired_capabilities=dcap)
driver.get(self.api_url)
text = driver.find_element_by_xpath('//div[@id="result"]/div/p').text
res = text.split('来自:')[1].split(' ')
@@ -157,7 +164,7 @@ def get_region(self):
def get_isp(self):
return self.get_geoinfo()[1]
-
+
if __name__ == '__main__':
ip = '110.84.0.129'
# iploc = location_taobao(ip)
@@ -170,4 +177,4 @@ def get_isp(self):
iploc = location_ipdotcn(ip)
# iploc.get_geoinfo()
print iploc.get_region()
- print iploc.get_isp()
\ No newline at end of file
+ print iploc.get_isp()
diff --git a/py2013/python3-mysql.py b/py2013/python3-mysql.py
index 241d8ef..dab24c3 100755
--- a/py2013/python3-mysql.py
+++ b/py2013/python3-mysql.py
@@ -3,19 +3,20 @@
# see details from http://dev.mysql.com/doc/connector-python/en/index.html
import mysql.connector
-import sys, os
+import sys
+import os
user = 'root'
-pwd = '123456'
+pwd = '123456'
host = '127.0.0.1'
-db = 'test'
+db = 'test'
data_file = 'mysql-test.dat'
create_table_sql = "CREATE TABLE IF NOT EXISTS mytable ( \
id int(10) AUTO_INCREMENT PRIMARY KEY, \
- name varchar(20), age int(4) ) \
- CHARACTER SET utf8"
+ name varchar(20), age int(4) ) \
+ CHARACTER SET utf8"
insert_sql = "INSERT INTO mytable(name, age) VALUES ('Jay', 22 ), ('杰', 26)"
select_sql = "SELECT id, name, age FROM mytable"
@@ -44,11 +45,15 @@
for line in lines:
myset = line.split()
- sql = "INSERT INTO mytable (name, age) VALUES ('{}', {})".format(myset[0], myset[1])
+ sql = "INSERT INTO mytable (name, age) VALUES ('{}', {})".format(
+ myset
+ [0],
+ myset
+ [1])
try:
cursor.execute(sql)
except mysql.connector.Error as err:
- print("insert table 'mytable' from file 'mysql-test.dat' -- failed.")
+ print("insert table 'mytable' from file 'mysql-test.dat' - failed.")
print("Error: {}".format(err.msg))
sys.exit()
diff --git a/py2013/rename_files.py b/py2013/rename_files.py
index 3061ab9..fceebcd 100644
--- a/py2013/rename_files.py
+++ b/py2013/rename_files.py
@@ -23,5 +23,5 @@ def rename_files():
# print(new_name)
os.rename(os.path.join(path,file),os.path.join(path,new_name))
-if __namei_ == '__main__':
+if __name__ == '__main__':
rename_files()
diff --git a/py2014/fibonacci.py b/py2014/fibonacci.py
index c47c965..98d40b2 100644
--- a/py2014/fibonacci.py
+++ b/py2014/fibonacci.py
@@ -2,6 +2,7 @@
def fib1(n):
+ ''' normal recursion '''
if n == 0:
return 0
elif n == 1:
@@ -14,6 +15,7 @@ def fib1(n):
def fib2(n):
+ ''' recursion with cached results '''
if n in known:
return known[n]
@@ -22,6 +24,30 @@ def fib2(n):
return res
+def fib3(n):
+ ''' non-recursion '''
+ last1 = 0
+ last2 = 1
+ if n == 0:
+ return 0
+ elif n == 1:
+ return 1
+ elif n >= 2:
+ for _ in range(2, n+1):
+ res = last1 + last2
+ last1 = last2
+ last2 = res
+ return last2
+
+
+def fib4(n):
+ ''' use a list to store all the results '''
+ l = [0, 1]
+ for i in range(2, n+1):
+ l.append(l[i-2] + l[i-1])
+ return l[n]
+
+
if __name__ == '__main__':
n = 40
print(datetime.datetime.now())
@@ -29,3 +55,7 @@ def fib2(n):
print(datetime.datetime.now())
print('fib2(%d)=%d' % (n, fib2(n)))
print(datetime.datetime.now())
+ print('fib3(%d)=%d' % (n, fib3(n)))
+ print(datetime.datetime.now())
+ print('fib4(%d)=%d' % (n, fib4(n)))
+ print(datetime.datetime.now())
diff --git a/py2014/paxel.py b/py2014/paxel.py
index 2376ba5..c60e261 100644
--- a/py2014/paxel.py
+++ b/py2014/paxel.py
@@ -15,7 +15,11 @@
import os
import time
import urllib
-from threading import Thread
+from threading import Thread, Lock
+import shutil
+from contextlib import closing
+
+printLocker = Lock()
# in case you want to use http_proxy
local_proxies = {'http': 'http://131.139.58.200:8080'}
@@ -26,7 +30,7 @@ class AxelPython(Thread, urllib.FancyURLopener):
run() is a vitural method of Thread.
'''
- def __init__(self, threadname, url, filename, ranges=0, proxies={}):
+ def __init__(self, threadname, url, filename, ranges, proxies={}):
Thread.__init__(self, name=threadname)
urllib.FancyURLopener.__init__(self, proxies)
self.name = threadname
@@ -52,7 +56,9 @@ def run(self):
return
self.oneTimeSize = 16384 # 16kByte/time
+ printLocker.acquire()
print 'task %s will download from %d to %d' % (self.name, self.startpoint, self.ranges[1])
+ printLocker.release()
self.addheader("Range", "bytes=%d-%d" % (self.startpoint, self.ranges[1]))
self.urlhandle = self.open(self.url)
@@ -71,14 +77,12 @@ def run(self):
def GetUrlFileSize(url, proxies={}):
- urlHandler = urllib.urlopen(url, proxies=proxies)
- headers = urlHandler.info().headers
- length = 0
- for header in headers:
- if header.find('Length') != -1:
- length = header.split(':')[-1].strip()
- length = int(length)
- return length
+ with closing(urllib.urlopen(url, proxies=proxies)) as urlHandler:
+ length = urlHandler.headers.getheader('Content-Length')
+ if length is None:
+ return 0
+ else:
+ return int(length)
def SpliteBlocks(totalsize, blocknumber):
@@ -122,19 +126,17 @@ def paxel(url, output, blocks=6, proxies=local_proxies):
sys.stdout.write(show)
sys.stdout.flush()
time.sleep(0.5)
-
- filehandle = open(output, 'wb+')
- for i in filename:
- f = open(i, 'rb')
- filehandle.write(f.read())
- f.close()
- try:
- os.remove(i)
- pass
- except:
- pass
-
- filehandle.close()
+ sys.stdout.write(u'\rFilesize:{0} Downloaded:{0} Completed:100% \n'.format(size))
+ sys.stdout.flush()
+
+ with open(output, 'wb+') as filehandle:
+ for i in filename:
+ with open(i, 'rb') as f:
+ shutil.copyfileobj(f, filehandle, 102400)
+ try:
+ os.remove(i)
+ except OSError:
+ pass
if __name__ == '__main__':
url = 'http://dldir1.qq.com/qqfile/QQforMac/QQ_V3.1.1.dmg'
diff --git a/py2014/urllib2_test.py b/py2014/urllib2_test.py
index b2084cd..ae86b3c 100644
--- a/py2014/urllib2_test.py
+++ b/py2014/urllib2_test.py
@@ -8,7 +8,7 @@
resp = urllib2.urlopen(req, timeout=5)
except urllib2.HTTPError, e:
if e.code == 404:
- print 'response code %d' % e.code
+ print 'response code 404'
else:
print 'not 404. response code %d' % e.code
except urllib2.URLError, e:
diff --git a/py2015/android_demo.py b/py2015/android_demo.py
new file mode 100644
index 0000000..b8c32ba
--- /dev/null
+++ b/py2015/android_demo.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+'''
+Create on 2015-05-05
+'''
+import os
+import unittest
+from selenium import webdriver
+import time
+
+# Appium环境配置
+PATH = lambda p: os.path.abspath(
+ os.path.join(os.path.dirname(__file__), p)
+)
+
+
+class DpAppTests(unittest.TestCase):
+
+ def setUp(self):
+ desired_caps = {}
+ desired_caps['platformName'] = 'Android'
+ desired_caps['platformVersion'] = '4.4'
+ desired_caps['deviceName'] = 'emulator-5554'
+ desired_caps['autoLaunch'] = 'true'
+ # desired_caps['automationName'] = "selendroid"
+ desired_caps['app'] = PATH(
+ 'apps/Nova_7.2.0_debug.apk'
+ )
+ desired_caps['appPackage'] = 'com.dianping.v1'
+ desired_caps[
+ 'appActivity'] = 'com.dianping.main.guide.SplashScreenActivity'
+
+ self.driver = webdriver.Remote(
+ 'http://localhost:4723/wd/hub', desired_caps)
+
+ def tearDown(self):
+ self.driver.quit()
+
+ def test_dpApp(self):
+ time.sleep(10)
+ el = self.driver.find_element_by_xpath(
+ "//android.widget.TextView[contains(@text,'美食')]")
+ el.click()
+
+
+if __name__ == '__main__':
+ suite = unittest.TestLoader().loadTestsFromTestCase(DpAppTests)
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/py2015/args_kargs.py b/py2015/args_kargs.py
new file mode 100644
index 0000000..ff6a9f6
--- /dev/null
+++ b/py2015/args_kargs.py
@@ -0,0 +1,16 @@
+# *-* encoding=utf-8 *-*
+'''
+just try to use *args and **kargs.
+*args表示任何多个无名参数,它是一个tuple;**kwargs表示关键字参数,它是一个dict。并且同时使用*args和**kwargs时,必须*args参数列要在**kwargs前
+'''
+
+def foo(*args, **kwargs):
+ print 'args = ', args
+ print 'kwargs = ', kwargs
+ print '---------------------------------------'
+
+if __name__ == '__main__':
+ foo(1,2,3,4)
+ foo(a=1,b=2,c=3)
+ foo(1,2,3,4, a=1,b=2,c=3)
+ foo('a', 1, None, a=1, b='2', c=3)
diff --git a/py2015/beer.py b/py2015/beer.py
new file mode 100644
index 0000000..2e8b04c
--- /dev/null
+++ b/py2015/beer.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python3
+'''
+calculate how many bottles of beer you can drink.
+1. money: n RMB (e.g. n=10)
+2. price: 2 RMB / bottle
+3. 3 empty bottles --> 1 bottle of beer
+可以最多向商店借1个空瓶, 但需要兑换喝完后将1个空瓶还回。
+2024年update: a. 增加了向商店借空瓶的说明 b. 使用python3而不是python2
+'''
+
+
+def bottles_cnt_beer(money=10):
+ '''
+ 计算能够喝到多少瓶啤酒。
+ 供参考答案如下:
+ 10 -> 7
+ 100 -> 75
+ 1234 -> 925
+ 12345 -> 9258
+ '''
+ price = 2
+ m = 3 # m empty bottles --> 1 bottle of beer
+ count = int(money / price)
+ empty_cnt = int(money / price)
+ while empty_cnt >= m:
+ count += int(empty_cnt / m)
+ empty_cnt = int(empty_cnt / m) + int(empty_cnt % m)
+ # borrow 1 empt bottle from the shop; drink; return 1 empty bottle to the shop.
+ if empty_cnt == (m - 1):
+ count += 1
+ return count
+
+
+if __name__ == '__main__':
+ n = 10
+ print("money n={}, you can drink {} bottles of beer.".format(n, bottles_cnt_beer(n)))
+ n = 100
+ print("money n={}, you can drink {} bottles of beer.".format(n, bottles_cnt_beer(n)))
+ n = 1234
+ print("money n={}, you can drink {} bottles of beer.".format(n, bottles_cnt_beer(n)))
+ n = int(input('Enter a number: '))
+ print("money n={}, you can drink {} bottles of beer.".format(n, bottles_cnt_beer(n)))
diff --git a/py2015/download_repos.py b/py2015/download_repos.py
new file mode 100644
index 0000000..57a5f86
--- /dev/null
+++ b/py2015/download_repos.py
@@ -0,0 +1,72 @@
+'''
+package installation command:
+ pip install beautifulsoup4 requests
+(bs4 doesn't work with python 2.6, so this only work on python 2.7)
+'''
+
+import requests
+from bs4 import BeautifulSoup
+import os
+from urllib import unquote
+
+
+class repos(object):
+
+ """download linux repos from mirrors' site."""
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh) Gecko/20100101 Firefox/42.0'}
+ urls_dict = {}
+
+ def __init__(self, base_url, base_dir):
+ super(repos, self).__init__()
+ self.base_url = base_url
+ self.base_dir = base_dir
+
+ def download(self):
+ for i in self.urls_dict:
+ for j in self.urls_dict[i]['files']:
+ url = self.base_url + i + j
+ print url
+ request = requests.get(url, headers=self.headers)
+ if request.ok:
+ file_location = self.base_dir + i + j
+ print file_location
+ if not os.path.exists(self.base_dir + i):
+ os.makedirs(self.base_dir + i)
+ with open(file_location, "wb") as the_file:
+ the_file.write(request.content)
+
+ def get_urls_dict(self, path='/', parent=None):
+ if path not in self.urls_dict:
+ self.urls_dict[path] = {
+ 'parent': parent, 'sub_dirs': [], 'files': []}
+ url = self.base_url + path
+ request = requests.get(url, headers=self.headers)
+ if request.ok:
+ soup = BeautifulSoup(request.text, 'html.parser')
+ for url in soup.find_all('a'):
+ url_text = unquote(url.get('href'))
+ if url_text.endswith('/') and url_text != '/' and url_text != '../':
+ self.urls_dict[path]['sub_dirs'].append(url_text)
+ elif not url_text.endswith('/') and not url_text.startswith('?'):
+ self.urls_dict[path]['files'].append(url_text)
+ if self.urls_dict[path]['parent'] == None and len(self.urls_dict[path]['sub_dirs']) == 0:
+ pass
+ elif len(self.urls_dict[path]['sub_dirs']) != 0:
+ for i in self.urls_dict[path]['sub_dirs']:
+ return self.get_urls_dict(path=path + i, parent=path)
+ elif self.urls_dict[path]['parent'] != None and len(self.urls_dict[path]['sub_dirs']) == 0:
+ self.urls_dict[self.urls_dict[path]['parent']][
+ 'sub_dirs'].remove(path.split('/')[-2] + '/')
+ return self.get_urls_dict(path=self.urls_dict[path]['parent'],
+ parent=self.urls_dict[self.urls_dict[path]['parent']]['parent'])
+
+
+if __name__ == '__main__':
+ url = 'http://mirrors.163.com/centos/6.7/os/x86_64'
+ the_dir = '/tmp/centos6u7'
+ repo = repos(url, the_dir)
+ repo.get_urls_dict()
+ # print repo.urls_dict
+ repo.download()
diff --git a/py2015/fib_generator.py b/py2015/fib_generator.py
new file mode 100644
index 0000000..948a1d3
--- /dev/null
+++ b/py2015/fib_generator.py
@@ -0,0 +1,16 @@
+#!/usr/bin/python
+# make a Fibonacci generator.
+
+
+def fib(max):
+ n, a, b = 0, 0, 1
+ while n < max:
+ yield b
+ a, b = b, a + b
+ n = n + 1
+
+
+if __name__ == '__main__':
+ fib10 = fib(10)
+ for i in fib10:
+ print i
diff --git a/py2015/filter_demo.py b/py2015/filter_demo.py
new file mode 100644
index 0000000..3c1e99a
--- /dev/null
+++ b/py2015/filter_demo.py
@@ -0,0 +1,16 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# 删除1~100的整数中的素数.
+
+import math
+
+
+def isNotPrime(n):
+ flag = False
+ for i in range(2, int(math.sqrt(n)) + 1):
+ if n % i == 0:
+ flag = True
+ return flag
+
+for i in filter(isNotPrime, xrange(1, 101)):
+ print i
diff --git a/py2015/gevent_demo.py b/py2015/gevent_demo.py
new file mode 100644
index 0000000..c40b50a
--- /dev/null
+++ b/py2015/gevent_demo.py
@@ -0,0 +1,17 @@
+from gevent import monkey
+monkey.patch_all()
+import gevent
+import urllib2
+
+
+def f(url):
+ print('GET: %s' % url)
+ resp = urllib2.urlopen(url)
+ data = resp.read()
+ print('%d bytes received from %s.' % (len(data), url))
+
+gevent.joinall([
+ gevent.spawn(f, 'https://www.python.org/'),
+ gevent.spawn(f, 'http://smilejay.com/'),
+ gevent.spawn(f, 'http://www.baidu.com/'),
+])
diff --git a/py2015/map_reduce.py b/py2015/map_reduce.py
new file mode 100644
index 0000000..c076296
--- /dev/null
+++ b/py2015/map_reduce.py
@@ -0,0 +1,12 @@
+#!/usr/bin/python
+# python built-in map()/reduce() exercises.
+
+print map(lambda x: x.title(), ['adam', 'LISA', 'barT', 'Jay'])
+
+
+def prod(list1):
+ return reduce(lambda x, y: x * y, list1)
+
+
+list1 = xrange(1, 6)
+print prod(list1)
diff --git a/py2015/mutiprocessing_pool.py b/py2015/mutiprocessing_pool.py
new file mode 100644
index 0000000..9c0a274
--- /dev/null
+++ b/py2015/mutiprocessing_pool.py
@@ -0,0 +1,23 @@
+from multiprocessing import Pool
+import os
+import time
+import random
+
+
+def long_time_task(name):
+ print 'Run task %s (%s)...' % (name, os.getpid())
+ start = time.time()
+ time.sleep(random.random() * 3)
+ end = time.time()
+ print 'Task %s runs %0.2f seconds.' % (name, (end - start))
+
+
+if __name__ == '__main__':
+ print 'Parent process %s.' % os.getpid()
+ p = Pool()
+ for i in range(5):
+ p.apply_async(long_time_task, args=(i,))
+ print 'Waiting for all subprocesses done...'
+ p.close()
+ p.join()
+ print 'All subprocesses done.'
diff --git a/py2015/mutiprocessing_queue.py b/py2015/mutiprocessing_queue.py
new file mode 100644
index 0000000..37357ba
--- /dev/null
+++ b/py2015/mutiprocessing_queue.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# _*_ coding:utf-8 _*_
+
+from multiprocessing import Process, Queue
+import time
+import random
+
+
+# 写数据进程执行的代码:
+def write(q):
+ for value in ['A', 'B', 'C']:
+ print 'Put %s to queue...' % value
+ q.put(value)
+ time.sleep(random.random())
+
+
+# 读数据进程执行的代码:
+def read(q):
+ while True:
+ value = q.get(True)
+ print 'Get %s from queue.' % value
+
+
+if __name__ == '__main__':
+ # 父进程创建Queue,并传给各个子进程:
+ queue = Queue()
+ p_write = Process(target=write, args=(queue,))
+ p_read = Process(target=read, args=(queue,))
+ # 启动子进程pw,写入:
+ p_write.start()
+ # 启动子进程pr,读取:
+ p_read.start()
+ # 等待pw结束:
+ p_write.join()
+ # pr进程里是死循环,无法等待其结束,只能强行终止:
+ p_read.terminate()
diff --git a/py2015/my.cfg b/py2015/my.cfg
new file mode 100644
index 0000000..d9c9bc7
--- /dev/null
+++ b/py2015/my.cfg
@@ -0,0 +1,15 @@
+[mysqld]
+datadir=/var/lib/mysql
+socket=/var/lib/mysql/mysql.sock
+user=mysql
+# Disabling symbolic-links is recommended to prevent assorted security risks
+symbolic-links=0
+
+[mysqld_safe]
+log-error=/var/log/mysqld.log
+pid-file=/var/run/mysqld/mysqld.pid
+
+[jay_test]
+log = /var/log/jay-test.log
+user = jay
+greeting = 'Hello, %(user)s!'
diff --git a/py2015/range_xrange.py b/py2015/range_xrange.py
new file mode 100644
index 0000000..cebb33a
--- /dev/null
+++ b/py2015/range_xrange.py
@@ -0,0 +1,73 @@
+# _*_ coding: utf-8 _*_
+'''
+Created on Jul 14, 2015
+
+@author: Jay
+'''
+
+
+def my_range(start, end=None, step=1):
+ result = []
+ if not isinstance(start, int):
+ return 'start argument must be an integer.'
+ if (not isinstance(end, int)) and (not end is None):
+ return 'end argument must be an integer.'
+ if not isinstance(step, int):
+ return 'step argument must be an integer.'
+ elif step == 0:
+ return 'step argument must not be zero.'
+ if isinstance(end, int):
+ while True:
+ if start < end:
+ result.append(start)
+ start += step
+ else:
+ break
+ else: # end is None
+ start, end = 0, start
+ while True:
+ if start < end:
+ result.append(start)
+ start += step
+ else:
+ break
+ return result
+
+
+# 跟range函数的实现基本一样,只是使用yield关键字表示生成器
+def my_xrange(start, end=None, step=1):
+ if not isinstance(start, int):
+ pass
+ if (not isinstance(end, int)) and (not end is None):
+ pass
+ if not isinstance(step, int):
+ pass
+ elif step == 0:
+ pass
+ if isinstance(end, int):
+ while True:
+ if start < end:
+ yield start
+ start += step
+ else:
+ break
+ else: # end is None
+ start, end = 0, start
+ while True:
+ if start < end:
+ yield start
+ start += step
+ else:
+ break
+
+
+if __name__ == '__main__':
+ print my_range(8)
+ print my_range(8, 1, 1)
+ print my_range(8, 1.5, 1)
+ print my_range(1, 9)
+ print my_range(1, 9, 0)
+ print [i for i in my_xrange(8)]
+ print [i for i in my_xrange(8, 1, 1)]
+ print [i for i in my_xrange(8, 1.5, 1)]
+ print [i for i in my_xrange(1, 9)]
diff --git a/py2015/special_vars.py b/py2015/special_vars.py
new file mode 100644
index 0000000..6e5113f
--- /dev/null
+++ b/py2015/special_vars.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+
+class Student(object):
+
+ def __init__(self, name, score):
+ self.__name = name
+ self.__score = score
+
+ def print_score(self):
+ print '%s: %s' % (self.__name, self.__score)
+
+
+if __name__ == '__main__':
+ jay = Student('Jay', 99)
+ jay.print_score()
+ #print jay.__name
+ print jay._Student__name
diff --git a/py2015/taskmanager.py b/py2015/taskmanager.py
new file mode 100644
index 0000000..39b03b1
--- /dev/null
+++ b/py2015/taskmanager.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# _*_ encoding: utf-8 _*_
+
+# taskmanager.py
+
+import random
+import Queue
+from multiprocessing.managers import BaseManager
+
+# 发送任务的队列:
+task_queue = Queue.Queue()
+# 接收结果的队列:
+result_queue = Queue.Queue()
+
+
+# 从BaseManager继承的QueueManager:
+class QueueManager(BaseManager):
+ pass
+
+# 把两个Queue都注册到网络上, callable参数关联了Queue对象:
+QueueManager.register('get_task_queue', callable=lambda: task_queue)
+QueueManager.register('get_result_queue', callable=lambda: result_queue)
+# 绑定端口5000, 设置验证码'abc':
+manager = QueueManager(address=('', 5000), authkey='abc')
+# 启动Queue:
+manager.start()
+# 获得通过网络访问的Queue对象:
+task = manager.get_task_queue()
+result = manager.get_result_queue()
+# 放几个任务进去:
+for i in range(10):
+ n = random.randint(0, 10000)
+ print('Put task %d...' % n)
+ task.put(n)
+# 从result队列读取结果:
+print('Try get results...')
+for i in range(10):
+ r = result.get(timeout=10)
+ print('Result: %s' % r)
+# 关闭:
+manager.shutdown()
diff --git a/py2015/taskworker.py b/py2015/taskworker.py
new file mode 100644
index 0000000..9e2be5b
--- /dev/null
+++ b/py2015/taskworker.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# _*_ encoding: utf-8 _*_
+# taskworker.py
+
+import time
+import Queue
+from multiprocessing.managers import BaseManager
+
+# 创建类似的QueueManager:
+class QueueManager(BaseManager):
+ pass
+
+# 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:
+QueueManager.register('get_task_queue')
+QueueManager.register('get_result_queue')
+
+# 连接到服务器,也就是运行taskmanager.py的机器:
+server_addr = '127.0.0.1'
+print('Connect to server %s...' % server_addr)
+# 端口和验证码注意保持与taskmanager.py设置的完全一致:
+m = QueueManager(address=(server_addr, 5000), authkey='abc')
+# 从网络连接:
+m.connect()
+# 获取Queue的对象:
+task = m.get_task_queue()
+result = m.get_result_queue()
+# 从task队列取任务,并把结果写入result队列:
+for i in range(10):
+ try:
+ n = task.get(timeout=1)
+ print('run task %d * %d...' % (n, n))
+ r = '%d * %d = %d' % (n, n, n * n)
+ time.sleep(1)
+ result.put(r)
+ except Queue.Empty:
+ print('task queue is empty.')
+# 处理结束:
+print('worker exit.')
diff --git a/py2015/temp-socket.py b/py2015/temp-socket.py
new file mode 100644
index 0000000..ebaa483
--- /dev/null
+++ b/py2015/temp-socket.py
@@ -0,0 +1,27 @@
+# use this socket server to debug an issue.
+
+import socket
+
+address = ('127.0.0.1', 8100)
+s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+s.bind(address)
+s.listen(5)
+
+while(True):
+ ss, addr = s.accept()
+ print 'got connected from', addr
+
+ ss.send('byebye')
+ ra = ss.recv(512)
+ print ra
+
+ for i in ra.split('\r\n\r\n'):
+ print i
+
+ for i in ra.split('\r\n'):
+ print i.encode('hex')
+ print i
+
+ ss.close()
+
+s.close()
diff --git a/py2015/threading_local.py b/py2015/threading_local.py
new file mode 100644
index 0000000..048ee05
--- /dev/null
+++ b/py2015/threading_local.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# _*_ coding: utf-8 _*_
+
+import threading
+
+# 创建全局ThreadLocal对象:
+local_school = threading.local()
+
+
+def process_student():
+ print 'Hello, %s (in %s)' % (local_school.student,
+ threading.current_thread().name)
+
+
+def process_thread(name):
+ # 绑定ThreadLocal的student:
+ local_school.student = name
+ process_student()
+
+
+t1 = threading.Thread(target= process_thread, args=('Alice',),
+ name='Thread-A')
+t2 = threading.Thread(target= process_thread, args=('Bob',),
+ name='Thread-B')
+t1.start()
+t2.start()
+t1.join()
+t2.join()
diff --git a/py2015/try_beautifulsoup4.py b/py2015/try_beautifulsoup4.py
new file mode 100644
index 0000000..be12896
--- /dev/null
+++ b/py2015/try_beautifulsoup4.py
@@ -0,0 +1,18 @@
+#!/usr/bin/python
+
+'''
+Beautiful Soup sits atop an HTML or XML parser, providing Pythonic idioms
+for iterating, searching, and modifying the parse tree.
+# pip install beautifulsoup4
+# pip install requests
+'''
+
+import requests
+from bs4 import BeautifulSoup
+
+url = 'http://www.baidu.com'
+headers = {'User-Agent': 'Mozilla/5.0 (Macintosh) Gecko/20100101 Firefox/38.0'}
+request = requests.get(url, headers=headers)
+if request.ok:
+ soup = BeautifulSoup(request.text, 'html.parser')
+ print soup.title.string
diff --git a/py2015/try_configparser.py b/py2015/try_configparser.py
new file mode 100644
index 0000000..e11ec53
--- /dev/null
+++ b/py2015/try_configparser.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+import ConfigParser
+
+config = ConfigParser.ConfigParser()
+config.read('my.cfg')
+
+print config.get('mysqld', 'socket')
+print config.get('mysqld_safe', 'pid-file')
+print config.get('jay_test', 'greeting', raw=0)
+print config.get('jay_test', 'greeting', raw=1)
+
+config.set('jay_test', 'log', '/var/log/jay-test-new.log')
+config.add_section('new_section')
+config.set('new_section', 'language', 'Python')
+with open('my-new.cfg', 'wb') as configfile:
+ config.write(configfile)
diff --git a/py2015/try_requests.py b/py2015/try_requests.py
new file mode 100644
index 0000000..36fc8c5
--- /dev/null
+++ b/py2015/try_requests.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+
+'''
+just try a lib for http request.
+# pip install requests
+
+To void an 'InsecurePlatformWarning':
+You only need to install the security package extras.
+$ pip install requests[security]
+or, install them directly:
+$ pip install pyopenssl ndg-httpsclient pyasn1
+'''
+
+import requests
+
+url = 'http://www.baidu.com'
+headers = {'User-Agent': 'Mozilla/5.0 (Macintosh) Gecko/20100101 Firefox/38.0'}
+request = requests.get(url, headers=headers)
+if request.ok:
+ print(request.text)
+
diff --git a/py2015/unified_order_fabfile.py b/py2015/unified_order_fabfile.py
new file mode 100644
index 0000000..97dc36b
--- /dev/null
+++ b/py2015/unified_order_fabfile.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# a fabfile to manange the performance test for unified order project.
+# usage: fab -f unified_order_fabfile.py start_jmeter -P -z 30
+# author: Jay
+
+from fabric.context_managers import cd
+from fabric.operations import run, put
+from fabric.api import task, env
+
+env.hosts = ['192.168.1.2', '192.168.1.3', '192.168.1.4']
+env.port = 22
+env.user = 'root'
+env.password = '123456'
+
+
+@task
+def hostname():
+ # show hostname # just for testing
+ with cd('/tmp'):
+ run('hostname')
+
+
+@task
+def copy_jmeter():
+ # copy jmeter to other machines
+ with cd('/tmp'):
+ run('rm -rf jakarta-jmeter-2.3.4')
+ put('jakarta-jmeter-2.3.4', '/tmp/')
+ run('cd jakarta-jmeter-2.3.4/bin; chmod a+x jmeter')
+ #run('ls /tmp/')
+
+
+@task
+def start_jmeter():
+ # run jmeter in all test clients
+ #with cd('/tmp/'):
+ with cd('/tmp/jakarta-jmeter-2.3.4/bin/'):
+ run('screen -d -m ./jmeter -n -t my-order.jmx -l log.jtl &>abc.log')
+ #run('./jmeter -n -t unified-order.jmx -l log.jtl &>abc.log')
+ #run('screen -d -m sleep 10', pty=False)
+ #run('service tomcat start', pty=False)
+
+
+@task
+def kill_jmeter():
+ # kill the jmeter processes for unified order project
+ with cd('/tmp/'):
+ pids = run("ps -ef | grep unified | grep -v 'grep' | awk '{print $2'}")
+ pid_list = pids.split('\r\n')
+ for i in pid_list:
+ run('kill -9 %s' % i)
+
+
+@task
+def get_status():
+ # get jmeter(java) running status
+ with cd('/tmp'):
+ run('ps -ef | grep unified | grep java | grep -v grep')
diff --git a/py2015/yanghui_triangle.py b/py2015/yanghui_triangle.py
new file mode 100644
index 0000000..03c269f
--- /dev/null
+++ b/py2015/yanghui_triangle.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+
+def task(num):
+ ret = []
+ if num < 1:
+ return None
+ elif num == 1:
+ ret = [1]
+ else:
+ ret_before = task(num - 1)
+ for i in (xrange(num)):
+ if i == 0:
+ ret.append(1)
+ elif len(ret_before) <= i:
+ ret.append(ret_before[i - 1])
+ else:
+ ret.append(ret_before[i - 1] + ret_before[i])
+ return ret
+
+
+for i in range(6):
+ if task(i):
+ print task(i)
diff --git a/py2015/zero_cnt_of_factorial.py b/py2015/zero_cnt_of_factorial.py
new file mode 100644
index 0000000..fa325fe
--- /dev/null
+++ b/py2015/zero_cnt_of_factorial.py
@@ -0,0 +1,16 @@
+'''
+calculate how many zero(0)s in the end of n!.
+'''
+
+
+def zero_cnt_of_factorial(num=50):
+ count = 0
+ i = 5
+ while ((num / i) >= 1):
+ count += num/i
+ i *= 5
+ return count
+
+if __name__ == '__main__':
+ n = int(raw_input('Enter a number: '))
+ print "%d! has %d zeros in the end." % (n, zero_cnt_of_factorial(n))
diff --git a/py2016/SimpleHTTPServerWithUpload.py b/py2016/SimpleHTTPServerWithUpload.py
new file mode 100644
index 0000000..9a5f5ea
--- /dev/null
+++ b/py2016/SimpleHTTPServerWithUpload.py
@@ -0,0 +1,300 @@
+#!/usr/bin/env python
+
+"""Simple HTTP Server With Upload.
+
+https://github.com/tualatrix/tools/blob/master/SimpleHTTPServerWithUpload.py
+
+This module builds on BaseHTTPServer by implementing the standard GET
+and HEAD requests in a fairly straightforward manner.
+
+"""
+
+
+import os
+import posixpath
+import BaseHTTPServer
+import urllib
+import cgi
+import shutil
+import mimetypes
+import re
+
+__version__ = "0.1"
+__all__ = ["SimpleHTTPRequestHandler"]
+__author__ = "bones7456"
+__home_page__ = "http://li2z.cn/"
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+
+class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+
+ """Simple HTTP request handler with GET/HEAD/POST commands.
+
+ This serves files from the current directory and any of its
+ subdirectories. The MIME type for files is determined by
+ calling the .guess_type() method. And can reveive file uploaded
+ by client.
+
+ The GET/HEAD/POST requests are identical except that the HEAD
+ request omits the actual contents of the file.
+
+ """
+
+ server_version = "SimpleHTTPWithUpload/" + __version__
+
+ def do_GET(self):
+ """Serve a GET request."""
+ f = self.send_head()
+ if f:
+ self.copyfile(f, self.wfile)
+ f.close()
+
+ def do_HEAD(self):
+ """Serve a HEAD request."""
+ f = self.send_head()
+ if f:
+ f.close()
+
+ def do_POST(self):
+ """Serve a POST request."""
+ r, info = self.deal_post_data()
+ print r, info, "by: ", self.client_address
+ f = StringIO()
+ f.write('')
+ f.write("\nUpload Result Page\n")
+ f.write("\nUpload Result Page
\n")
+ f.write("
\n")
+ if r:
+ f.write("Success:")
+ else:
+ f.write("Failed:")
+ f.write(info)
+ f.write("
back" % self.headers['referer'])
+ f.write("
Powered By: bones7456, check new version at ")
+ f.write("")
+ f.write("here.\n\n")
+ length = f.tell()
+ f.seek(0)
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.send_header("Content-Length", str(length))
+ self.end_headers()
+ if f:
+ self.copyfile(f, self.wfile)
+ f.close()
+
+ def deal_post_data(self):
+ boundary = self.headers.plisttext.split("=")[1]
+ remainbytes = int(self.headers['content-length'])
+ line = self.rfile.readline()
+ remainbytes -= len(line)
+ if boundary not in line:
+ return (False, "Content NOT begin with boundary")
+ line = self.rfile.readline()
+ remainbytes -= len(line)
+ fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line)
+ if not fn:
+ return (False, "Can't find out file name...")
+ path = self.translate_path(self.path)
+ fn = os.path.join(path, fn[0])
+ while os.path.exists(fn):
+ fn += "_"
+ line = self.rfile.readline()
+ remainbytes -= len(line)
+ line = self.rfile.readline()
+ remainbytes -= len(line)
+ try:
+ out = open(fn, 'wb')
+ except IOError:
+ return (False, "Can't create file to write, do you have permission to write?")
+
+ preline = self.rfile.readline()
+ remainbytes -= len(preline)
+ while remainbytes > 0:
+ line = self.rfile.readline()
+ remainbytes -= len(line)
+ if boundary in line:
+ preline = preline[0:-1]
+ if preline.endswith('\r'):
+ preline = preline[0:-1]
+ out.write(preline)
+ out.close()
+ return (True, "File '%s' upload success!" % fn)
+ else:
+ out.write(preline)
+ preline = line
+ return (False, "Unexpect Ends of data.")
+
+ def send_head(self):
+ """Common code for GET and HEAD commands.
+
+ This sends the response code and MIME headers.
+
+ Return value is either a file object (which has to be copied
+ to the outputfile by the caller unless the command was HEAD,
+ and must be closed by the caller under all circumstances), or
+ None, in which case the caller has nothing further to do.
+
+ """
+ path = self.translate_path(self.path)
+ f = None
+ if os.path.isdir(path):
+ if not self.path.endswith('/'):
+ # redirect browser - doing basically what apache does
+ self.send_response(301)
+ self.send_header("Location", self.path + "/")
+ self.end_headers()
+ return None
+ for index in "index.html", "index.htm":
+ index = os.path.join(path, index)
+ if os.path.exists(index):
+ path = index
+ break
+ else:
+ return self.list_directory(path)
+ ctype = self.guess_type(path)
+ try:
+ # Always read in binary mode. Opening files in text mode may cause
+ # newline translations, making the actual size of the content
+ # transmitted *less* than the content-length!
+ f = open(path, 'rb')
+ except IOError:
+ self.send_error(404, "File not found")
+ return None
+ self.send_response(200)
+ self.send_header("Content-type", ctype)
+ fs = os.fstat(f.fileno())
+ self.send_header("Content-Length", str(fs[6]))
+ self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
+ self.end_headers()
+ return f
+
+ def list_directory(self, path):
+ """Helper to produce a directory listing (absent index.html).
+
+ Return value is either a file object, or None (indicating an
+ error). In either case, the headers are sent, making the
+ interface the same as for send_head().
+
+ """
+ try:
+ list = os.listdir(path)
+ except os.error:
+ self.send_error(404, "No permission to list directory")
+ return None
+ list.sort(key=lambda a: a.lower())
+ f = StringIO()
+ displaypath = cgi.escape(urllib.unquote(self.path))
+ f.write('')
+ f.write("\nDirectory listing for %s\n" % displaypath)
+ f.write("\nDirectory listing for %s
\n" % displaypath)
+ f.write("
\n")
+ f.write("\n")
+ f.write("
\n\n")
+ for name in list:
+ fullname = os.path.join(path, name)
+ displayname = linkname = name
+ # Append / for directories or @ for symbolic links
+ if os.path.isdir(fullname):
+ displayname = name + "/"
+ linkname = name + "/"
+ if os.path.islink(fullname):
+ displayname = name + "@"
+ # Note: a link to a directory displays with @ and links with /
+ f.write('- %s\n'
+ % (urllib.quote(linkname), cgi.escape(displayname)))
+ f.write("
\n
\n\n\n")
+ length = f.tell()
+ f.seek(0)
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.send_header("Content-Length", str(length))
+ self.end_headers()
+ return f
+
+ def translate_path(self, path):
+ """Translate a /-separated PATH to the local filename syntax.
+
+ Components that mean special things to the local file system
+ (e.g. drive or directory names) are ignored. (XXX They should
+ probably be diagnosed.)
+
+ """
+ # abandon query parameters
+ path = path.split('?', 1)[0]
+ path = path.split('#', 1)[0]
+ path = posixpath.normpath(urllib.unquote(path))
+ words = path.split('/')
+ words = filter(None, words)
+ path = os.getcwd()
+ for word in words:
+ drive, word = os.path.splitdrive(word)
+ head, word = os.path.split(word)
+ if word in (os.curdir, os.pardir):
+ continue
+ path = os.path.join(path, word)
+ return path
+
+ def copyfile(self, source, outputfile):
+ """Copy all data between two file objects.
+
+ The SOURCE argument is a file object open for reading
+ (or anything with a read() method) and the DESTINATION
+ argument is a file object open for writing (or
+ anything with a write() method).
+
+ The only reason for overriding this would be to change
+ the block size or perhaps to replace newlines by CRLF
+ -- note however that this the default server uses this
+ to copy binary data as well.
+
+ """
+ shutil.copyfileobj(source, outputfile)
+
+ def guess_type(self, path):
+ """Guess the type of a file.
+
+ Argument is a PATH (a filename).
+
+ Return value is a string of the form type/subtype,
+ usable for a MIME Content-type header.
+
+ The default implementation looks the file's extension
+ up in the table self.extensions_map, using application/octet-stream
+ as a default; however it would be permissible (if
+ slow) to look inside the data to make a better guess.
+
+ """
+
+ base, ext = posixpath.splitext(path)
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ ext = ext.lower()
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ else:
+ return self.extensions_map['']
+
+ if not mimetypes.inited:
+ mimetypes.init() # try to read system mime.types
+ extensions_map = mimetypes.types_map.copy()
+ extensions_map.update({
+ '': 'application/octet-stream', # Default
+ '.py': 'text/plain',
+ '.c': 'text/plain',
+ '.h': 'text/plain',
+ })
+
+
+def test(HandlerClass=SimpleHTTPRequestHandler,
+ ServerClass=BaseHTTPServer.HTTPServer):
+ BaseHTTPServer.test(HandlerClass, ServerClass)
+
+if __name__ == '__main__':
+ test()
diff --git a/py2016/cleanup_pid.py b/py2016/cleanup_pid.py
new file mode 100644
index 0000000..3d32c09
--- /dev/null
+++ b/py2016/cleanup_pid.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+
+import os
+import subprocess
+
+base_dir = '/tmp/pid_dir'
+pid_files = ['ut.pid', 'ft.pid']
+max_seconds = 48 * 3600
+
+
+def check_pid(pid):
+ """ Check For the existence of a unix pid. """
+ try:
+ os.kill(pid, 0)
+ except OSError:
+ return False
+ else:
+ return True
+
+
+def get_elapsed_time(pid):
+ '''get the elapsed time of the process with this pid'''
+ cmd = 'ps -p %s -o pid,etime' % str(pid)
+ proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+ # get data from stdout
+ proc.wait()
+ results = proc.stdout.readlines()
+ # parse data (should only be one)
+ for result in results:
+ try:
+ result.strip()
+ if result.split()[0] == str(pid):
+ pidInfo = result.split()[1]
+ # stop after the first one we find
+ break
+ except IndexError:
+ pass # ignore it
+ else:
+ # didn't find one
+ print "Process PID %s doesn't seem to exist!" % pid
+ return 0
+ pidInfo = [result.split()[1] for result in results
+ if result.split()[0] == str(pid)][0]
+ pidInfo = pidInfo.partition("-")
+ if pidInfo[1] == '-':
+ # there is a day
+ days = int(pidInfo[0])
+ rest = pidInfo[2].split(":")
+ hours = int(rest[0])
+ minutes = int(rest[1])
+ seconds = int(rest[2])
+ else:
+ days = 0
+ rest = pidInfo[0].split(":")
+ if len(rest) == 3:
+ hours = int(rest[0])
+ minutes = int(rest[1])
+ seconds = int(rest[2])
+ elif len(rest) == 2:
+ hours = 0
+ minutes = int(rest[0])
+ seconds = int(rest[1])
+ else:
+ hours = 0
+ minutes = 0
+ seconds = int(rest[0])
+
+ elapsed_time = days*24*3600 + hours*3600 + minutes*60 + seconds
+ return elapsed_time
+
+
+def remove_pid(pidfiles):
+ '''remove pid files if the process is not running.'''
+ for i in pidfiles:
+ filepath = '%s/%s' % (base_dir, i)
+ if os.path.exists(filepath):
+ del_flag = 0
+ with open(filepath) as f:
+ pid = f.read()
+ if not check_pid(int(pid)):
+ print 'pid file: %s' % i
+ print 'process does not exist with pid %s' % pid
+ del_flag = 1
+ elif get_elapsed_time(pid) > max_seconds:
+ print 'elapsed_time is greater than max_seconds'
+ print 'tring to kill pid %s' % pid
+ os.kill(int(pid), 9)
+ del_flag = 1
+ if del_flag:
+ os.unlink(filepath)
+
+
+if __name__ == '__main__':
+ remove_pid(pid_files)
diff --git a/py2016/draw_a_tree_with_turtle.py b/py2016/draw_a_tree_with_turtle.py
new file mode 100644
index 0000000..4c43e0b
--- /dev/null
+++ b/py2016/draw_a_tree_with_turtle.py
@@ -0,0 +1,34 @@
+from turtle import Turtle
+import time
+
+
+def tree(tlist, l, a, f):
+ if l > 5:
+ lst = []
+ for t in tlist:
+ t.forward(l)
+ p = t.clone()
+ t.left(a)
+ p.right(a)
+ lst.append(t)
+ lst.append(p)
+ time.sleep(1)
+ tree(lst, l*f, a, f)
+
+
+def main():
+ t = Turtle()
+ t.color('green')
+ t.pensize(5)
+ #t.hideturtle()
+ #t.speed(1)
+ t.getscreen().tracer(30, 0)
+ t.left(90)
+ t.penup()
+ t.goto(0, -200)
+ t.pendown()
+ tree([t], 150, 60, 0.6)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/py2016/for_else.py b/py2016/for_else.py
new file mode 100644
index 0000000..c1117d5
--- /dev/null
+++ b/py2016/for_else.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+# test how to use for-else / while-else clause.
+# else sub-clause will be executed when there's no break/return/exception in the
+# iteration.
+
+list1 = [1, 2, 3, 4, 5]
+list2 = [4, 5, 6, 7, 8]
+
+for i in list1:
+ if i > 5:
+ print 'item is larger than 5; the index in list1 is %d' % list1.index(i)
+ break
+else:
+ print 'No item in list1 is larger than 5.'
+
+for i in list2:
+ if i > 5:
+ print 'item is larger than 5; the index in list2 is %d' % list2.index(i)
+ break
+else:
+ print 'No item in list2 is larger than 5.'
+
+
+i = 0
+while i < 10:
+ if i > 5:
+ print '%d is larger than 5' % i
+ break
+ i += 1
+else:
+ print 'No one is larger than 5'
diff --git a/py2016/randomMAC.py b/py2016/randomMAC.py
new file mode 100644
index 0000000..4243c00
--- /dev/null
+++ b/py2016/randomMAC.py
@@ -0,0 +1,22 @@
+#!/usr/bin/python
+# *-* coding:utf-8 *-*
+
+import random
+
+
+def randomMAC():
+ '''
+ generate random MAC address.
+ the first 24 bits are for OUI (Organizationally Unique Identifier).
+ OUI是由IEEE的注册管理机构给不同厂家分配的代码,区分了不同的厂家。
+ '''
+ mac = [0x00, 0x8c, 0xfa,
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)
+ ]
+ return ':'.join(map(lambda x: "%02x" % x, mac))
+
+
+if __name__ == '__main__':
+ print randomMAC()
diff --git a/py2016/rpmdb.py b/py2016/rpmdb.py
new file mode 100644
index 0000000..56ba747
--- /dev/null
+++ b/py2016/rpmdb.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jay
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+import os
+import syslog
+import subprocess
+import time
+
+DOCUMENTATION = '''
+---
+module: rpmdb
+short_description: Manages the rpm database.
+description:
+ - Check and rebuild the rpm database.
+version_added: "2.0"
+options:
+ action:
+ choices: [ "check", "rebuild" ]
+ description:
+ - The action name.
+ - check: only check if rpm db is OK or not.
+ - rebuild: rebuild rpm db if it is NOT OK.
+ required: false
+ default: check
+ timeout:
+ description:
+ - The TIMEOUT seconds when checking rpm db.
+ required: false
+ default: 10
+notes: []
+requirements: [ rpm, rm ]
+author: Jay
+'''
+
+EXAMPLES = '''
+- rpmdb: action=check
+- rpmdb: action=rebuild
+'''
+
+# ==============================================================
+
+
+RPMBIN = '/bin/rpm'
+
+
+def log(msg):
+ syslog.openlog('ansible-%s' % os.path.basename(__file__))
+ syslog.syslog(syslog.LOG_NOTICE, msg)
+
+
+def execute_command(module, cmd):
+ log('Command %s' % '|'.join(cmd))
+ return module.run_command(cmd)
+
+
+def check_db(module, timeout=10):
+ rc = 0
+ logfile = '/tmp/rpm-qa.log'
+ elapsed_time = 0
+ cmd = '%s -qa &> %s' % (RPMBIN, logfile)
+ child = subprocess.Popen(cmd, shell=True)
+
+ while elapsed_time <= timeout:
+ child_ret = child.poll()
+ if child_ret is None: # child still running
+ time.sleep(1)
+ elapsed_time += 1
+ elif child_ret == 0:
+ if 'error:' in open(logfile, 'r').read():
+ rc = 1
+ break
+ else: # cmd is excuted with no error.
+ break
+ else:
+ rc = 2
+ break
+ if elapsed_time > timeout:
+ child.kill()
+ time.sleep(1)
+ rc = 3
+ return rc
+
+
+def rebuild_db(module):
+ rmdb_cmd = ['rm', '-f', '/var/lib/rpm/__db.*']
+ rc1, out1, err1 = execute_command(module, rmdb_cmd)
+ cmd = [RPMBIN, '--rebuilddb']
+ rc, out, err = execute_command(module, cmd)
+ return (rc == 0) and (rc1 == 0)
+
+
+# main
+def main():
+
+ # defining module
+ module = AnsibleModule(
+ argument_spec=dict(
+ action = dict(required=False, default='check', choices=['check', 'rebuild']),
+ timeout = dict(required=False, default=10, type='int')
+ )
+ )
+
+ changed = False
+ msg = ''
+ action = module.params['action']
+ timeout = module.params['timeout']
+ check_cmd = 'rpm -qa'
+
+ if action == 'check':
+ rc = check_db(module, timeout)
+ if rc == 1:
+ module.fail_json(msg='Error when running cmd: %s' % (check_cmd))
+ elif rc == 2:
+ module.fail_json(msg='return code error. cmd: %s' % (check_cmd))
+ elif rc == 3:
+ module.fail_json(msg='Timeout %d s. cmd: %s' % (timeout, check_cmd))
+ elif rc == 0:
+ msg = 'OK. cmd: %s' % check_cmd
+
+ elif action == 'rebuild':
+ rc = check_db(module, timeout)
+ if rc != 0:
+ if rebuild_db(module):
+ changed = True
+ msg = 'OK. rm -f /var/lib/rpm/__db.00*; rpm --rebuilddb'
+ else:
+ msg = 'Error. rm -f /var/lib/rpm/__db.00*; rpm --rebuilddb'
+ module.fail_json(msg=msg)
+
+ module.exit_json(
+ changed = changed,
+ action = action,
+ msg = msg
+ )
+
+# this is magic, see lib/ansible/executor/module_common.py
+#<>
+main()
diff --git a/py2016/selenium_capture_firefox.py b/py2016/selenium_capture_firefox.py
new file mode 100644
index 0000000..7fc2097
--- /dev/null
+++ b/py2016/selenium_capture_firefox.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+from selenium import webdriver
+import time
+
+
+def capture(url, img_file="test1.png"):
+ firefox = webdriver.Firefox()
+ firefox.set_window_size(1200, 900)
+ firefox.get(url)
+ firefox.execute_script("""
+ (function () {
+ var y = 0;
+ var step = 100;
+ window.scroll(0, 0);
+
+ function f() {
+ if (y < document.body.scrollHeight) {
+ y += step;
+ window.scroll(0, y);
+ setTimeout(f, 50);
+ } else {
+ window.scroll(0, 0);
+ document.title += "scroll-done";
+ }
+ }
+
+ setTimeout(f, 1000);
+ })();
+ """)
+
+ for i in xrange(30):
+ if "scroll-done" in firefox.title:
+ break
+ time.sleep(1)
+
+ firefox.save_screenshot(img_file)
+ firefox.close()
+
+
+if __name__ == "__main__":
+ capture("http://www.taobao.com")
diff --git a/py2016/selenium_capture_safari.py b/py2016/selenium_capture_safari.py
new file mode 100644
index 0000000..ef365d2
--- /dev/null
+++ b/py2016/selenium_capture_safari.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+from selenium import webdriver
+import time
+
+
+def capture(url, img_file="test1.png"):
+ safari = webdriver.Safari()
+ safari.set_window_size(1200, 900)
+ safari.get(url)
+ safari.execute_script("""
+ (function () {
+ var y = 0;
+ var step = 100;
+ window.scroll(0, 0);
+
+ function f() {
+ if (y < document.body.scrollHeight) {
+ y += step;
+ window.scroll(0, y);
+ setTimeout(f, 50);
+ } else {
+ window.scroll(0, 0);
+ document.title += "scroll-done";
+ }
+ }
+
+ setTimeout(f, 1000);
+ })();
+ """)
+
+ for i in xrange(30):
+ if "scroll-done" in safari.title:
+ break
+ time.sleep(1)
+
+ safari.save_screenshot(img_file)
+ safari.close()
+
+
+if __name__ == "__main__":
+ capture("http://smilejay.com/")
diff --git a/py2016/try_argparse.py b/py2016/try_argparse.py
new file mode 100644
index 0000000..0b59d25
--- /dev/null
+++ b/py2016/try_argparse.py
@@ -0,0 +1,18 @@
+'''
+just have a try with argparse.
+e.g. python try_argparse.py -v -c 10
+'''
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument("-v", "--verbose", help="increase output verbosity",
+ action="store_true")
+parser.add_argument("-c", "--copy", help="use * copies")
+args = parser.parse_args()
+if args.verbose:
+ print "verbosity turned on"
+else:
+ print "No --verbose arg"
+if args.copy:
+ copies = int(args.copy)
+ for i in range(0, copies):
+ print '%d copy' % i
diff --git a/py2016/try_collections.py b/py2016/try_collections.py
new file mode 100644
index 0000000..7c9a940
--- /dev/null
+++ b/py2016/try_collections.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+from collections import defaultdict
+
+colors = ['red', 'green', 'white', 'red', 'blue', 'red']
+
+d = {}
+for c in colors:
+ if c not in d:
+ d[c] = 0
+ d[c] += 1
+print(d)
+
+d1 = {}
+for c in colors:
+ d1[c] = d1.get(c, 0) + 1
+print(d1)
+
+d2 = defaultdict(int)
+for c in colors:
+ d2[c] += 1
+print(d2)
+
+
+from collections import OrderedDict
+od = OrderedDict()
+od['z'] = 1
+od['y'] = 2
+od['x'] = 3
+print(od.keys()) # 按照插入的Key的顺序返回
+
+
+from collections import deque
+
+# 双端队列
+q = deque(['a', 'b', 'c'])
+q.append('x')
+q.appendleft('y')
+print(q)
+
+
+from collections import Counter
+c = Counter()
+for ch in 'helloworld':
+ c[ch] = c[ch] + 1
+print(c)
\ No newline at end of file
diff --git a/py2016/try_docopt.py b/py2016/try_docopt.py
new file mode 100644
index 0000000..dcfa130
--- /dev/null
+++ b/py2016/try_docopt.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""Just try docopt lib for python
+
+Usage:
+ try_docopt.py (-h | --help)
+ try_docopt.py [options]
+
+Examples:
+ try_docopt.py -s +ts5,-ts2 -c +tc5,-tc3
+
+Options:
+ -h, --help
+ -s, --testsuite suites #add/remove some testsuites
+ -c, --testcase cases #add/remove some testcases
+
+"""
+
+from docopt import docopt
+
+testsuites = ['ts1', 'ts2', 'ts3', 'ts4']
+testcases = ['tc1', 'tc2', 'tc3', 'tc4']
+
+
+def add_remove(tlist, opt_list):
+ '''
+ add/remove item in tlist.
+ opt_list is a list like ['+ts5', '-ts2'] or ['+tc5', '-tc3'].
+ '''
+ flag = 0
+ for i in opt_list:
+ i = i.strip()
+ if i.startswith('+'):
+ tlist.append(i[1:])
+ elif i.startswith('-'):
+ if i[1:] in tlist:
+ tlist.remove(i[1:])
+ else:
+ print 'bad argument: %s is not in %s' % (i[1:], tlist)
+ flag = 1
+ else:
+ print 'bad argument: %s' % i
+ flag = 1
+ if flag:
+ return flag
+ else:
+ return tlist
+
+if __name__ == '__main__':
+ args = docopt(__doc__)
+ ts_arg = args.get('--testsuite')
+ tc_arg = args.get('--testcase')
+ if ts_arg:
+ ts_opt_list = ts_arg.strip().split(',')
+ testsuites = add_remove(testsuites, ts_opt_list)
+ if tc_arg:
+ tc_opt_list = tc_arg.strip().split(',')
+ testcases = add_remove(testcases, tc_opt_list)
+ if testsuites != 1 and testcases != 1:
+ print 'ts: %s' % testsuites
+ print 'tc: %s' % testcases
diff --git a/py2016/try_except_else_finally.py b/py2016/try_except_else_finally.py
new file mode 100644
index 0000000..7c1b798
--- /dev/null
+++ b/py2016/try_except_else_finally.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+import math
+
+print '===================================='
+try:
+ math.sqrt(-1)
+except ValueError, e:
+ print 'ValueError: %s' % e
+except AttributeError, e:
+ print 'AttributeError: %s' % e
+except:
+ print 'Other Exception: %s' % e
+else:
+ print 'No error found'
+finally:
+ print 'Finally. #1\n'
+
+
+print '===================================='
+try:
+ None.get('a')
+except ValueError, e:
+ print 'ValueError: %s' % e
+except AttributeError, e:
+ print 'AttributeError: %s' % e
+except:
+ print 'Other Exception: %s' % e
+else:
+ print 'No error found'
+finally:
+ print 'Finally. #2\n'
+
+
+print '===================================='
+try:
+ 1 + 1
+except ValueError, e:
+ print 'ValueError: %s' % e
+except AttributeError, e:
+ print 'AttributeError: %s' % e
+except:
+ print 'Other Exception: %s' % e
+else:
+ print 'No error found'
+finally:
+ print 'Finally. #3\n'
diff --git a/py2016/try_turtle.py b/py2016/try_turtle.py
new file mode 100644
index 0000000..3f3a095
--- /dev/null
+++ b/py2016/try_turtle.py
@@ -0,0 +1,24 @@
+# *-* coding=utf-8 *-*
+import turtle
+import time
+#定义绘制时画笔的颜色
+turtle.color("purple")
+#定义绘制时画笔的线条的宽度
+turtle.pensize(5)
+#定义绘图的速度
+turtle.speed(10)
+#以0,0为起点进行绘制
+turtle.goto(0,0)
+#绘出正方形的四条边
+for i in range(4):
+ turtle.forward(100)
+ turtle.right(90)
+#画笔移动到点(-150,-120)时不绘图
+turtle.up()
+turtle.goto(-150,-120)
+#再次定义画笔颜色
+turtle.color("red")
+#在(-150,-120)点上打印"Done"
+turtle.write("Done")
+turtle.done()
+#time.sleep(10)
diff --git a/py2017/mutiprocessing_pool_return_value.py b/py2017/mutiprocessing_pool_return_value.py
new file mode 100644
index 0000000..5584f80
--- /dev/null
+++ b/py2017/mutiprocessing_pool_return_value.py
@@ -0,0 +1,26 @@
+from multiprocessing import Pool
+import os
+import time
+import random
+
+
+def exponent(name, x, y):
+ print 'Run task %s (%s)...' % (name, os.getpid())
+ start = time.time()
+ time.sleep(random.random() * 3)
+ end = time.time()
+ print 'Task %s runs %0.2f seconds.' % (name, (end - start))
+ return x**y
+
+
+if __name__ == '__main__':
+ print 'Parent process %s.' % os.getpid()
+ p = Pool(4)
+ results = [p.apply_async(exponent, args=(i, x, y))
+ for (i, x, y) in zip(range(10), range(10), range(10))]
+ print 'Waiting for all subprocesses done...'
+ p.close()
+ p.join()
+ print 'All subprocesses done.'
+ output = [r.get() for r in results]
+ print output
diff --git a/py2018/mean.py b/py2018/mean.py
new file mode 100644
index 0000000..9c64e7b
--- /dev/null
+++ b/py2018/mean.py
@@ -0,0 +1,11 @@
+import numpy
+
+def mean(numbers):
+ ''' calculate the mean value of a number list '''
+ return float(sum(numbers)) / max(len(numbers), 1)
+
+
+if __name__ == '__main__':
+ list1 = [134, 134, 34.324, 34354, 11.2]
+ print mean(list1)
+ print numpy.mean(list1)
diff --git a/py2018/proc_start_time.py b/py2018/proc_start_time.py
new file mode 100644
index 0000000..2833c06
--- /dev/null
+++ b/py2018/proc_start_time.py
@@ -0,0 +1,23 @@
+'''
+get the start time of a process
+e.g. python proc_start_time.py -p 12345
+'''
+import argparse
+import psutil
+import os
+import time
+
+
+def get_start_time(pid):
+ ''' return the start time in human readable string '''
+ p = psutil.Process(int(pid))
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(p.create_time()))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-p", "--pid", help="PID of a process")
+ args = parser.parse_args()
+ if not args.pid:
+ args.pid = os.getpid()
+ print get_start_time(args.pid)
diff --git a/py2018/set_check_localtime.py b/py2018/set_check_localtime.py
new file mode 100644
index 0000000..1e0c776
--- /dev/null
+++ b/py2018/set_check_localtime.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8
+
+import sys
+import time
+import subprocess
+import argparse
+import urllib2
+
+
+def set_beijing_time_from_web(url):
+ ''' set os and hardware clock as beijing time from internet '''
+ # use urllib2 in python2; not use requests which need installation
+ response = urllib2.urlopen(url)
+ #print response.read()
+ # 获取http头date部分
+ ts = response.headers['date']
+ # 将日期时间字符转化为time
+ gmt_time = time.strptime(ts[5:25], "%d %b %Y %H:%M:%S")
+ # 将GMT时间转换成北京时间
+ local_time = time.localtime(time.mktime(gmt_time) + 8*3600)
+ str1 = "%u-%02u-%02u" % (local_time.tm_year,
+ local_time.tm_mon, local_time.tm_mday)
+ str2 = "%02u:%02u:%02u" % (
+ local_time.tm_hour, local_time.tm_min, local_time.tm_sec)
+ cmd = 'date -s "%s %s"' % (str1, str2)
+ #print cmd
+ subprocess.check_call(cmd, shell=True)
+ hw_cmd = 'hwclock -w'
+ #print hw_cmd
+ subprocess.check_call(hw_cmd, shell=True)
+ print 'OK. set time: %s' % ' '.join([str1, str2])
+
+
+def check_localtime_with_internet(url):
+ ''' check local time with internet '''
+ threshold = 2
+ # use urllib2 in python2; not use requests which need installation
+ response = urllib2.urlopen(url)
+ #print response.read()
+ # 获取http头date部分
+ ts = response.headers['date']
+ # 将日期时间字符转化为time
+ gmt_time = time.strptime(ts[5:25], "%d %b %Y %H:%M:%S")
+ # 将GMT时间转换成北京时间
+ internet_ts = time.mktime(gmt_time)
+ local_ts = time.mktime(time.gmtime())
+ if abs(local_ts - internet_ts) <= threshold:
+ print 'OK. check localtime.'
+ else:
+ print 'ERROR! local_ts: %s internet_ts:%s' % (local_ts, internet_ts)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ url = 'http://www.baidu.com'
+ parser = argparse.ArgumentParser()
+ parser.description = 'set/check localtime (i.e. CST) with internet'
+ parser.add_argument('-c', '--check', action='store_true',
+ help='only check local time')
+ parser.add_argument('-s', '--set', action='store_true',
+ help='only set local time')
+ parser.add_argument('-u', '--url', default=url,
+ help='the url to sync time')
+ args = parser.parse_args()
+ if args.set:
+ set_beijing_time_from_web(args.url)
+ else:
+ check_localtime_with_internet(args.url)
diff --git a/py2018/try_glob.py b/py2018/try_glob.py
new file mode 100644
index 0000000..891cd39
--- /dev/null
+++ b/py2018/try_glob.py
@@ -0,0 +1,18 @@
+import os
+import time
+import glob
+
+
+def check_files():
+ ''' check files modified in an hour'''
+ program_list = ['qemu', 'libvirtd']
+ the_dir = '/tmp/'
+ time_delta = 3600
+ for i in program_list:
+ for j in glob.glob('%s/*%s*' % (the_dir, i)):
+ if os.path.getmtime(j) >= time.time() - time_delta:
+ print 'found file: %s' % j
+
+
+if __name__ == '__main__':
+ check_files()
diff --git a/py2020/crawler_douban_movie.py b/py2020/crawler_douban_movie.py
new file mode 100644
index 0000000..e5133ba
--- /dev/null
+++ b/py2020/crawler_douban_movie.py
@@ -0,0 +1,48 @@
+import time
+import asyncio
+import aiohttp
+from bs4 import BeautifulSoup
+
+
+async def fetch_content(url):
+ default_header = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:77.0) Gecko/20100101 Firefox/77.0"}
+ async with aiohttp.ClientSession(
+ headers=default_header, connector=aiohttp.TCPConnector(ssl=False)
+ ) as session:
+ async with session.get(url) as response:
+ return await response.text()
+
+
+async def main():
+ url = "https://movie.douban.com/cinema/later/hangzhou/"
+ init_page = await fetch_content(url)
+ init_soup = BeautifulSoup(init_page, 'lxml')
+
+ movie_names, urls_to_fetch, movie_dates = [], [], []
+
+ all_movies = init_soup.find('div', id="showing-soon")
+ for each_movie in all_movies.find_all('div', class_="item"):
+ all_a_tag = each_movie.find_all('a')
+ all_li_tag = each_movie.find_all('li')
+
+ movie_names.append(all_a_tag[1].text)
+ urls_to_fetch.append(all_a_tag[1]['href'])
+ movie_dates.append(all_li_tag[0].text)
+
+ tasks = [fetch_content(url) for url in urls_to_fetch]
+ pages = await asyncio.gather(*tasks)
+
+ for movie_name, movie_date, page in zip(movie_names, movie_dates, pages):
+ soup_item = BeautifulSoup(page, 'lxml')
+ img_tag = soup_item.find('img')
+
+ print('{} {} {}'.format(movie_name, movie_date, img_tag['src']))
+
+
+if '__name__' == '__main__':
+ start = time.time()
+ print('start: {}'.format(start))
+ asyncio.run(main())
+ end = time.time()
+ print('end: {}'.format(end))
+ print('it took {} seconds.'.format(end - start))
diff --git a/py2020/test_decorator.py b/py2020/test_decorator.py
new file mode 100644
index 0000000..4c893d5
--- /dev/null
+++ b/py2020/test_decorator.py
@@ -0,0 +1,31 @@
+from functools import wraps
+
+
+''' useful doc: https://www.runoob.com/w3cnote/python-func-decorators.html '''
+
+def logit(logfile='logit.log'):
+ def logging_decorator(func):
+ @wraps(func)
+ def wrapped_function(*args, **kwargs):
+ log_string = func.__name__ + " was called"
+ print(log_string)
+ with open(logfile, 'a') as opened_file:
+ opened_file.write(log_string + '\n')
+ return func(*args, **kwargs)
+ return wrapped_function
+ return logging_decorator
+
+
+@logit()
+def myfunc1():
+ pass
+
+
+@logit(logfile='func2.log')
+def myfunc2():
+ pass
+
+
+if __name__ == '__main__':
+ myfunc1()
+ myfunc2()
\ No newline at end of file
diff --git a/py2021/my_time_it.py b/py2021/my_time_it.py
new file mode 100644
index 0000000..828fcd3
--- /dev/null
+++ b/py2021/my_time_it.py
@@ -0,0 +1,35 @@
+# *_* coding=utf-8 *_*
+
+import time
+import functools
+
+
+# 装饰器 用于打印函数执行耗时 性能分析很有用
+# 这是为python2 写的; python3中 不要使用time.clock()了
+# DeprecationWarning: time.clock has been deprecated in Python 3.3 and will be removed from Python 3.8: use time.perf_counter or time.process_time instead
+def time_it(func):
+ @functools.wraps(func)
+ def _wrapper(*args, **kwargs):
+ # start = time.clock()
+ start = time.time()
+ func(*args, **kwargs)
+ # end = time.clock()
+ end = time.time()
+ print("function %s() costs %s second(s)" % (func.__name__, end - start))
+ return _wrapper
+
+
+@time_it
+def test1(x, y):
+ time.sleep(1)
+ return x + y
+
+
+@time_it
+def test2(x, y):
+ time.sleep(3)
+ return x + y
+
+if __name__ == '__main__':
+ test1(3, 5)
+ test2(3, 5)
diff --git a/py2021/test_gc.py b/py2021/test_gc.py
new file mode 100644
index 0000000..b8c6c38
--- /dev/null
+++ b/py2021/test_gc.py
@@ -0,0 +1,42 @@
+import os
+import gc
+import time
+import psutil
+
+
+def print_memory_info():
+ pid = os.getpid()
+ p = psutil.Process(pid)
+
+ info = p.memory_full_info()
+ MB = 1024 * 1024
+ memory = info.uss / MB
+ print('used %d MB' % memory)
+
+def test_func():
+ print("test start")
+ print_memory_info()
+ length = 1000 * 1000
+ list = [i for i in range(length)]
+ print_memory_info()
+
+def test1_func():
+ print("test1 start")
+ print_memory_info()
+ length = 1000 * 1000
+ list_a = [i for i in range(length)]
+ list_b = [i for i in range(length)]
+ list_a.append(list_b)
+ list_b.append(list_a)
+ print_memory_info()
+ return list
+
+
+test_func()
+print_memory_info()
+test1_func()
+print_memory_info()
+time.sleep(10)
+print_memory_info()
+gc.collect()
+print_memory_info()
diff --git a/py2022/remove_item_in_for_list.py b/py2022/remove_item_in_for_list.py
new file mode 100644
index 0000000..f859037
--- /dev/null
+++ b/py2022/remove_item_in_for_list.py
@@ -0,0 +1,42 @@
+list1 = [1, 2, 3, 4, 5, 2, 2]
+for i in list1:
+ if i == 2:
+ list1.remove(i)
+print(list1)
+
+
+# list1 = [1, 2, 3, 4, 5, 2, 2]
+# for i in range(len(list1)):
+# if list1[i] == 2:
+# list1.remove(list1[i])
+ # IndexError: list index out of range
+# print(list1)
+
+list1 = [1, 2, 3, 4, 5, 2, 2]
+list2 = list(filter(lambda x: x!=2, list1))
+print(list2)
+
+list1 = [1, 2, 3, 4, 5, 2, 2]
+list2 = [i for i in list1 if i!=2]
+print(list2)
+
+list1 = [1, 2, 3, 4, 5, 2, 2]
+for i in list1[:]:
+ if i == 2:
+ list1.remove(i)
+print(list1)
+
+import copy
+
+list1 = [1, 2, 3, 4, 5, 2, 2]
+# list2 = copy.copy(list1)
+list2 = copy.deepcopy(list1)
+for i in list1:
+ if i == 2:
+ list2.remove(i)
+print(list2)
+
+list1 = [1, 2, 3, 4, 5, 2, 2]
+while 2 in list1:
+ list1.remove(2)
+print(list1)
diff --git a/py2022/ssh_cmd.py b/py2022/ssh_cmd.py
new file mode 100644
index 0000000..9015d17
--- /dev/null
+++ b/py2022/ssh_cmd.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+# *_* coding=utf-8 *_*
+
+import paramiko
+
+hostname_list = ['192.168.1.2', '192.162.1.3']
+username = 'root'
+password = 'yourpassword'
+username = 'admin'
+password = ''
+port = 22
+
+
+hostname_list = []
+
+def get_hosts(h_file):
+ with open(h_file) as f:
+ for l in f.readlines():
+ hostname_list.append(l.strip())
+
+
+def exec_cmd(cmd):
+ ''' exec a cmd on a remote linux system '''
+ for h in hostname_list:
+ try:
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy)
+ print('connecting: %s' % h)
+ client.connect(h, port=port, username=username, password=password,
+ timeout=5)
+ # 多次执行命令 可复用同一个Channel
+ chan = client.get_transport().open_session()
+ print('exec cmd: %s' % cmd)
+ chan.exec_command(cmd)
+ print('exit code: %d' % chan.recv_exit_status())
+ if chan.recv_exit_status() == 0:
+ print('%s OK' % h)
+ else:
+ print('%s Error!' % h)
+ print(chan.recv(200).strip())
+ # stdin, stdout, stderr = client.exec_command(cmd)
+ # print(stdout.read().strip())
+ # print(stderr.read().strip())
+ except Exception as e:
+ print(e)
+ finally:
+ chan.close()
+ client.close()
+
+
+if __name__ == '__main__':
+ host_file = 'temp_test_ips'
+ get_hosts(host_file)
+ cmd = 'uptime'
+ cmd = 'echo $(date)>>/tmp/a; sleep 1; uptime; exit 1'
+ exec_cmd(cmd)
diff --git a/py2022/ssh_scp.py b/py2022/ssh_scp.py
new file mode 100644
index 0000000..73ba104
--- /dev/null
+++ b/py2022/ssh_scp.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# *_* coding=utf-8 *_*
+
+import paramiko
+
+hostname_list = ['192.168.1.2', '192.162.1.3']
+username = 'admin'
+password = 'yourpassword'
+port = 22
+
+
+hostname_list = []
+
+def get_hosts(h_file):
+ with open(h_file) as f:
+ for l in f.readlines():
+ hostname_list.append(l.strip())
+
+
+def send_file():
+ ''' send (or fetch) a file to (or from) a remote linux machine '''
+ src_1 = 'ssh_cmd.py'
+ dst_1 = '/tmp/ssh_cmd.py'
+ put_files = [(src_1, dst_1)]
+ for h in hostname_list:
+ try:
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy)
+ print('connecting: %s' % h)
+ client.connect(h, port=port, username=username, password=password,
+ timeout=5)
+ sftp = client.open_sftp()
+ for p in put_files:
+ print('src file: %s , dst file: %s' % (p[0], p[1]))
+ sftp.put(p[0], p[1])
+ except Exception as e:
+ print(e)
+ finally:
+ sftp.close()
+ client.close()
+
+
+if __name__ == '__main__':
+ host_file = 'temp_test_ips'
+ get_hosts(host_file)
+ send_file()
diff --git a/py2024/calculate_reserved_mem.py b/py2024/calculate_reserved_mem.py
new file mode 100644
index 0000000..a682695
--- /dev/null
+++ b/py2024/calculate_reserved_mem.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python3
+
+import re
+import sys
+from subprocess import Popen, PIPE, STDOUT
+
+
+def shell_rc_and_output(cmd):
+ ''' execute a shell command and get its return code and output (stdout/stderr) '''
+ p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
+ out = p.communicate()[0]
+ rc = p.returncode
+ if isinstance(out, bytes):
+ # out is a string
+ out = out.decode()
+ return rc, str(out)
+
+
+def parse_memory_line(line):
+ """
+ 解析单行内存映射信息,并返回内存区域的起始和结束地址。
+ """
+ pattern = '.+\[mem (.+)-(.+)\].+'
+ match = re.search(pattern, line)
+ # print(match)
+ if match:
+ start = match.group(1)
+ end = match.group(2)
+ # print(match.group(1))
+ # print(match.group(2))
+ start = int(start, 16)
+ end = int(end, 16)
+ return start, end
+
+
+def calculate_reserved_memory(lines):
+ """
+ 计算所有保留内存区域的总大小。
+ """
+ total_reserved_memory = 0
+ for line in lines:
+ if 'mem' in line and 'reserved' in line:
+ start, end = parse_memory_line(line)
+ # 计算内存区域的大小,并考虑包含结束地址
+ size = end - start + 1
+ total_reserved_memory += size
+ return total_reserved_memory
+
+
+def calculate_memory(lines):
+ """
+ 计算所有内存区域的总大小。
+ """
+ total_memory = 0
+ for line in lines:
+ if 'mem' in line:
+ start, end = parse_memory_line(line)
+ # 计算内存区域的大小,并考虑包含结束地址
+ size = end - start + 1
+ total_memory += size
+ return total_memory
+
+
+def get_e820_mem_map():
+ """"
+ 根据 dmesg -T 命令查看 e820表的信息, 返回一个列表。
+ """
+ # 返回如下这样格式的列表 (放在这里仅供代码调试用)
+ # memory_map = [
+ # "BIOS-e820: [mem 0x0000000000000000-0x000000000009ffff] usable",
+ # "BIOS-e820: [mem 0x0000000000100000-0x000000005ad0efff] usable",
+ # "BIOS-e820: [mem 0x000000005ad0f000-0x0000000060377fff] reserved",
+ # "BIOS-e820: [mem 0x0000000060378000-0x000000007fffefff] usable",
+ # "BIOS-e820: [mem 0x000000007ffff000-0x000000007fffffff] reserved",
+ # "BIOS-e820: [mem 0x0000000080000000-0x00000000bf8eefff] usable",
+ # "BIOS-e820: [mem 0x00000000bf8ef000-0x00000000bfb6efff] reserved",
+ # "BIOS-e820: [mem 0x00000000bfb6f000-0x00000000bfb7efff] ACPI data",
+ # "BIOS-e820: [mem 0x00000000bfb7f000-0x00000000bfbfefff] ACPI NVS",
+ # "BIOS-e820: [mem 0x00000000bfbff000-0x00000000bff7bfff] usable",
+ # "BIOS-e820: [mem 0x00000000bff7c000-0x00000000bfffffff] reserved",
+ # "BIOS-e820: [mem 0x0000000100000000-0x000000081fffffff] usable",
+ # "BIOS-e820: [mem 0x0000000820000000-0x000000083fffffff] reserved"
+ # ]
+ # return memory_map
+ cmd = "dmesg -T | grep -Eio ' (bios-e820:.*mem .*)'"
+ rc, out = shell_rc_and_output(cmd)
+ if rc != 0:
+ print("Error! failed to run cmd: {}".format(cmd))
+ sys.exit(1)
+ else:
+ memory_map = out.split('\n')
+ return memory_map
+
+
+def print_reserved_ratio(total, reserved):
+ """
+ 计算内存被reserved的比例, 并打印百分比
+ """
+ ratio = reserved / total
+ percentage = ratio * 100
+ print("memory reserved percentage: {:.2f}%".format(percentage))
+
+
+if __name__ == '__main__':
+ # 获取BIOS-e820内存映射信息
+ mem_map = get_e820_mem_map()
+ # 计算总内存大小
+ total_memory = calculate_memory(mem_map)
+ total_mem_mb = total_memory / 1024 / 1024
+ print("Total memory: {:.0f} MB".format(total_mem_mb))
+ # 计算保留的内存大小
+ reserved_memory = calculate_reserved_memory(mem_map)
+ res_mem_mb = reserved_memory / 1024 / 1024
+ print("Total reserved memory: {:.0f} MB".format(res_mem_mb))
+ print_reserved_ratio(total_mem_mb, res_mem_mb)
diff --git a/utils/run_cmd.py b/utils/run_cmd.py
new file mode 100644
index 0000000..d758e98
--- /dev/null
+++ b/utils/run_cmd.py
@@ -0,0 +1,22 @@
+from subprocess import Popen, PIPE, STDOUT
+
+def shell_output(cmd):
+ ''' execute a shell command and get its output (stdout/stderr) '''
+ p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
+ return p.communicate()[0]
+
+
+def shell_rc_and_output(cmd):
+ ''' execute a shell command and get its return code and output (stdout/stderr) '''
+ p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
+ out = p.communicate()[0]
+ rc = p.returncode
+ return rc, out
+
+
+if __name__ == "__main__":
+ cmd = 'ls -l'
+ print(shell_output(cmd))
+ rc, out = shell_rc_and_output(cmd)
+ print(rc)
+ print('rc: {}, out: {}'.format(rc, out))
diff --git a/wsgi_benchmarking_2018/Bjoern.wsgi b/wsgi_benchmarking_2018/Bjoern.wsgi
new file mode 100644
index 0000000..f3cb9b1
--- /dev/null
+++ b/wsgi_benchmarking_2018/Bjoern.wsgi
@@ -0,0 +1,9 @@
+import bjoern
+from app import application
+
+bjoern.run(
+ wsgi_app=application,
+ host='0.0.0.0',
+ port=8000,
+ reuse_port=True
+)
diff --git a/wsgi_benchmarking_2018/CherryPy.wsgi b/wsgi_benchmarking_2018/CherryPy.wsgi
new file mode 100644
index 0000000..082feca
--- /dev/null
+++ b/wsgi_benchmarking_2018/CherryPy.wsgi
@@ -0,0 +1,21 @@
+import socket
+try:
+ from cheroot.wsgi import Server as WSGIServer
+except ImportError:
+ from cherrypy.wsgiserver import CherryPyWSGIServer as WSGIServer
+from app import application
+
+server = WSGIServer(
+ bind_addr=('0.0.0.0', 8100),
+ wsgi_app=application,
+ request_queue_size=500,
+ server_name=socket.gethostname()
+)
+
+if __name__ == '__main__':
+ try:
+ server.start()
+ except KeyboardInterrupt:
+ pass
+ finally:
+ server.stop()
diff --git a/wsgi_benchmarking_2018/Gunicorn-meinheld.sh b/wsgi_benchmarking_2018/Gunicorn-meinheld.sh
new file mode 100755
index 0000000..bef639f
--- /dev/null
+++ b/wsgi_benchmarking_2018/Gunicorn-meinheld.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -x
+PROCESSOR_COUNT=$(nproc)
+# This formula is recommended in the Gunicorn documentation
+# http://docs.gunicorn.org/en/stable/design.html#how-many-workers
+GUNICORN_WORKER_COUNT=$(( PROCESSOR_COUNT * 2 + 1 ))
+
+gunicorn -w ${GUNICORN_WORKER_COUNT} --worker-class="meinheld.gmeinheld.MeinheldWorker" -b 0.0.0.0:8400 app:application
diff --git a/wsgi_benchmarking_2018/Gunicorn.sh b/wsgi_benchmarking_2018/Gunicorn.sh
new file mode 100755
index 0000000..1cd63a9
--- /dev/null
+++ b/wsgi_benchmarking_2018/Gunicorn.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -x
+PROCESSOR_COUNT=$(nproc)
+# This formula is recommended in the Gunicorn documentation
+# http://docs.gunicorn.org/en/stable/design.html#how-many-workers
+GUNICORN_WORKER_COUNT=$(( PROCESSOR_COUNT * 2 + 1 ))
+
+gunicorn -w ${GUNICORN_WORKER_COUNT} -b 0.0.0.0:8200 app:application
diff --git a/wsgi_benchmarking_2018/app.py b/wsgi_benchmarking_2018/app.py
new file mode 100644
index 0000000..697628e
--- /dev/null
+++ b/wsgi_benchmarking_2018/app.py
@@ -0,0 +1,11 @@
+def application(environment, start_response):
+ """
+ The main WSGI Application. Doesn't really do anything
+ since we're benchmarking the servers, not this code :)
+ """
+
+ start_response(
+ '200 OK', # Status
+ [('Content-type', 'text/plain'), ('Content-Length', '2')] # Headers
+ )
+ return ['OK']
diff --git a/wsgi_benchmarking_2018/benchmark.sh b/wsgi_benchmarking_2018/benchmark.sh
new file mode 100755
index 0000000..4b18c3e
--- /dev/null
+++ b/wsgi_benchmarking_2018/benchmark.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+IP=192.168.122.140
+PORTS=(8000 8100 8200 8300 8400)
+CONNECTIONS=(100 500 1000 5000 10000)
+THREADS=8
+DURATION=30
+BASE=$1
+
+ulimit -n 10240
+
+
+function perf() {
+ echo " Testing with $1 threads and $2 connections ..."
+ ./wrk --duration $DURATION --threads $1 --connections "$2" "http://$IP:$3" > "$3_$1_$2.log"
+}
+
+for connections in "${CONNECTIONS[@]}"; do
+ for port in "${PORTS[@]}"; do
+ perf "$THREADS" "$connections" "$port"
+ sleep 1
+ done
+done
diff --git a/wsgi_benchmarking_2018/uWSGI.sh b/wsgi_benchmarking_2018/uWSGI.sh
new file mode 100755
index 0000000..a407b01
--- /dev/null
+++ b/wsgi_benchmarking_2018/uWSGI.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# The official documentation doesn't have any word on this,
+# but this seems to be a common practice
+PROCESSOR_COUNT=$(nproc)
+THREAD_COUNT=2
+
+uwsgi --http :8300 --plugin python2 --wsgi-file app.py --processes "$PROCESSOR_COUNT" --threads "$THREAD_COUNT" --disable-logging